From e00d5172308dd42f0b5e45bfa0190c76d4fe44ab Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 22:56:04 +0300 Subject: [PATCH 01/81] Add files via upload Added assignment and store-into-other NLR tests for every activation function. --- Test_NetworkLevelReasoner.h | 3638 +++++++++++++++++++++++++++++++++++ 1 file changed, 3638 insertions(+) create mode 100644 Test_NetworkLevelReasoner.h diff --git a/Test_NetworkLevelReasoner.h b/Test_NetworkLevelReasoner.h new file mode 100644 index 0000000000..5e98d7fb3a --- /dev/null +++ b/Test_NetworkLevelReasoner.h @@ -0,0 +1,3638 @@ +/********************* */ +/*! \file Test_NetworkLevelReasoner.h + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Andrew Wu + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + +**/ + +#include "../../engine/tests/MockTableau.h" // TODO: fix this +#include "FloatUtils.h" +#include "Layer.h" +#include "NetworkLevelReasoner.h" +#include "Options.h" +#include "Query.h" +#include "Tightening.h" +#include "Vector.h" + +#include + +class MockForNetworkLevelReasoner +{ +public: +}; + +class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite +{ +public: + MockForNetworkLevelReasoner *mock; + + void setUp() + { + TS_ASSERT( mock = new MockForNetworkLevelReasoner ); + } + + void tearDown() + { + TS_ASSERT_THROWS_NOTHING( delete mock ); + } + + void populateNetwork( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithSigmoids( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithAbs( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithSign( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithRound( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithLeakyRelu( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + + + void populateNetworkWithMax( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x b e + g + y c f + d + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::MAX, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -2 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 2 ); + nlr.setWeight( 0, 1, 1, 3, -3 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 0, 2 ); + + // Mark the Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void populateNetworkWithSoftmax( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SOFTMAX, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithBilinear( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x b e + g + y c f + d + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::BILINEAR, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -2 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 2 ); + nlr.setWeight( 0, 1, 1, 3, -3 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 0, 2 ); + + // Mark the Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void populateNetworkWithAbsAndRelu( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round/Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithRoundAndSign( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round/Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithLeakyReluAndSigmoid( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the LeakyReLU/Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d + b f + y e + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + + void populateNetworkWithReluAndBilinear( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d + b f + y e + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void test_evaluate_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + double input[2]; + double output[2]; + + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With ReLUs, case 1 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); + + // With ReLUs, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); + } + + void test_evaluate_sigmoids() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSigmoids( nlr ); + + double input[2]; + double output[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); + + // case 3 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); + } + + void test_evaluate_abs() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbs( nlr ); + + double input[2]; + double output[2]; + + // With Abs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Abs, case 1 + input[0] = -2; + input[1] = -2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Abs, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); + } + + void test_evaluate_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSign( nlr ); + + double input[2]; + double output[2]; + + // With Sign, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Sign, case 1 + input[0] = -2; + input[1] = -2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Sign, case 2 (0 considered "non-negative", sign(0)=1) + input[0] = -1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); + } + + void test_evaluate_round() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRound( nlr ); + + double input[2]; + double output[2]; + + // With Round, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Round, case 1 + input[0] = 2.1; + input[1] = 1.4; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); + + // With Round, case 2 + input[0] = 2.1; + input[1] = 1.6; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); + } + + void test_evaluate_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyRelu( nlr ); + + double input[2]; + double output[2]; + + // With Leaky ReLU, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Leaky ReLU, case 1 (alpha=0.1) + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); + + // With Leaky ReLU, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); + } + + void test_evaluate_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithMax( nlr ); + + double input[2]; + double output[1]; + + // With Max, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); + + // With Max, case 1 + input[0] = 1; + input[1] = -3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); + + // With Max, case 2 + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); + } + + + void test_evaluate_softmax() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmax( nlr ); + + double input[2]; + double output[2]; + + // With Softmax, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); + + // With Softmax, case 1 + input[0] = 1; + input[1] = -3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); + + // With Softmax, case 2 + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); + } + + void test_evaluate_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithBilinear( nlr ); + + double input[2]; + double output[1]; + + // With Bilinear, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + + // With Bilinear, case 1 + input[0] = 0.1; + input[1] = -0.3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); + + // With Bilinear, case 2 + input[0] = -0.3; + input[1] = 0.3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); + } + + void test_evaluate_non_consecutive_layers() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 0, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Set the weights and relus + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 2, 3, 1, -2 ); + nlr.setWeight( 0, 1, 3, 1, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 2, 5, 0, 1 ); + + // Evaluate + double input[2]; + double output; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); + + input[0] = -1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); + + TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); + } + + void test_evaluate_abs_and_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbsAndRelu( nlr ); + + double input[2]; + double output[2]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + } + + void test_evaluate_round_and_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRoundAndSign( nlr ); + + double input[2]; + double output[2]; + + input[0] = 1.6; + input[1] = 1.4; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); + + input[0] = 1.6; + input[1] = 1.6; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); + } + + void test_evaluate_leaky_relu_and_sigmoid() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + double input[2]; + double output[2]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); + } + + void test_evaluate_softmax_and_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmaxAndMax( nlr ); + + double input[2]; + double output[1]; + + input[0] = 1; + input[1] = -3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); + + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); + } + + void test_evaluate_relu_and_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithReluAndBilinear( nlr ); + + double input[2]; + double output[1]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + } + + void test_store_into_other() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // With ReLUs, case 1 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_sigmoids() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSigmoids( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_round() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRound( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSign( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_abs() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbs( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyRelu( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithMax( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_softmax() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmax( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithBilinear( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_interval_arithmetic_bound_propagation_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : expectedBounds ) + TS_ASSERT( bounds.exists( bound ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds2.size(), bounds.size() ); + for ( const auto &bound : expectedBounds2 ) + TS_ASSERT( bounds.exists( bound ) ); + } + + void test_interval_arithmetic_bound_propagation_abs_constraints() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Layer dependenices + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : expectedBounds ) + TS_ASSERT( bounds.exists( bound ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_EQUALS( expectedBounds2.size(), bounds.size() ); + for ( const auto &bound : expectedBounds2 ) + TS_ASSERT( bounds.exists( bound ) ); + } + + void populateNetworkSBT( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + 2 R 1 + x0 --- x2 ---> x4 --- x6 + \ / / + 1 \ / / + \/ -1 / + /\ / + 3 / \ / + / \ R / + x1 --- x3 ---> x5 + 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + } + + void test_sbt_relus_all_active() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both ReLUs active, bound survive through activations: + + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : expectedBounds ) + TS_ASSERT( bounds.exists( bound ) ); + } + + void test_sbt_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_sbt_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. + Coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = 0.5x0 + 1.25x1 - 11.25 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_sbt_relus_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_sbt_abs_all_positive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both absolute values positive, bound survive through activations: + + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_sbt_abs_positive_and_negative() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_sbt_absolute_values_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is undecided, bounds are concretized. + Second ReLU is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_sbt_absolute_values_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2 is eliminated, everything set to -3 + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Second absolute value is positive, bounds surive the activation + + x4: all set to 3 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + printf( "Dumpign discovered bounds:\n" ); + for ( const auto &bound : bounds ) + bound.dump(); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_generate_input_query() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Variable indexing + + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + + // Set the weights and biases for the weighted sum layers + + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 0 ); + nlr.setBias( 1, 2, 0 ); + + nlr.setBias( 3, 0, 0 ); + nlr.setBias( 3, 1, 2 ); + + nlr.setBias( 5, 0, 0 ); + nlr.setBias( 5, 1, 0 ); + + // Mark the ReLU/Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Start the testing + Query ipq; + nlr.generateQuery( ipq ); + List unhandledEquations; + Set varsInUnhandledConstraints; + TS_ASSERT( + ipq.constructNetworkLevelReasoner( unhandledEquations, varsInUnhandledConstraints ) ); + NLR::NetworkLevelReasoner *reconstructedNlr = ipq.getNetworkLevelReasoner(); + + double input[2]; + double output[2]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + } + + void test_simulate_relus() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With ReLUs, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With ReLUs, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 1 ) ); + } + + // With ReLUs, case 1 and 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0 ) ); + } + } + + void test_simulate_sigmoids() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSigmoids( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.6750, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 3.0167, + 0.0001 ) ); + } + + // case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.6032, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.5790, + 0.0001 ) ); + } + + // case 3 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.5045, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.1957, + 0.0001 ) ); + } + } + + void test_simulate_non_consecutive_layers() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 0, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Set the weights and relus + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 2, 3, 1, -2 ); + nlr.setWeight( 0, 1, 3, 1, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 2, 5, 0, 1 ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // Simulate1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2 ) ); + + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } + + void test_simulate_relus_and_abs() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU/Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // Simulate1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2 ) ); + } + + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 4 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + } + + void test_concretize_input_assignment() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + + populateNetwork( nlr ); + + // With ReLUs, Inputs are zeros, only biases count + tableau.nextValues[0] = 0; + tableau.nextValues[1] = 0; + + Map assignment; + + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); + + TS_ASSERT( assignment.size() == 14 ); + TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 4 ) ); + + // With ReLUs, case 1 + tableau.nextValues[0] = 1; + tableau.nextValues[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); + + TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 1 ) ); + + // With ReLUs, case 2 + tableau.nextValues[0] = 1; + tableau.nextValues[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); + + TS_ASSERT( FloatUtils::areEqual( assignment[12], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 0 ) ); + } + + + void test_obtain_bound_from_ipq() + { + NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); + + Query query; + query.setNumberOfVariables( 14 ); + + + // Initialize the bounds + query.setLowerBound( 0, -1 ); + query.setUpperBound( 0, 1 ); + query.setLowerBound( 1, -1 ); + query.setUpperBound( 1, 1 ); + + double large = 1000; + query.setLowerBound( 2, -large ); + query.setUpperBound( 2, large ); + query.setLowerBound( 3, -large ); + query.setUpperBound( 3, large ); + query.setLowerBound( 4, -large ); + query.setUpperBound( 4, large ); + query.setLowerBound( 5, -large ); + query.setUpperBound( 5, large ); + query.setLowerBound( 6, -large ); + query.setUpperBound( 6, large ); + query.setLowerBound( 7, -large ); + query.setUpperBound( 7, large ); + query.setLowerBound( 8, -large ); + query.setUpperBound( 8, large ); + query.setLowerBound( 9, -large ); + query.setUpperBound( 9, large ); + query.setLowerBound( 10, -large ); + query.setUpperBound( 10, large ); + query.setLowerBound( 11, -large ); + query.setUpperBound( 11, large ); + query.setLowerBound( 12, -large ); + query.setUpperBound( 12, large ); + query.setLowerBound( 13, -large ); + query.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds( query ) ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : expectedBounds ) + TS_ASSERT( bounds.exists( bound ) ); + } +}; From 04cc0b19e373462221aeac8fadaca10cb2f92a27 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 22:58:53 +0300 Subject: [PATCH 02/81] Revert last change. --- Test_NetworkLevelReasoner.h | 3638 ----------------------------------- 1 file changed, 3638 deletions(-) delete mode 100644 Test_NetworkLevelReasoner.h diff --git a/Test_NetworkLevelReasoner.h b/Test_NetworkLevelReasoner.h deleted file mode 100644 index 5e98d7fb3a..0000000000 --- a/Test_NetworkLevelReasoner.h +++ /dev/null @@ -1,3638 +0,0 @@ -/********************* */ -/*! \file Test_NetworkLevelReasoner.h - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz, Andrew Wu - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - -**/ - -#include "../../engine/tests/MockTableau.h" // TODO: fix this -#include "FloatUtils.h" -#include "Layer.h" -#include "NetworkLevelReasoner.h" -#include "Options.h" -#include "Query.h" -#include "Tightening.h" -#include "Vector.h" - -#include - -class MockForNetworkLevelReasoner -{ -public: -}; - -class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -{ -public: - MockForNetworkLevelReasoner *mock; - - void setUp() - { - TS_ASSERT( mock = new MockForNetworkLevelReasoner ); - } - - void tearDown() - { - TS_ASSERT_THROWS_NOTHING( delete mock ); - } - - void populateNetwork( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithSigmoids( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithAbs( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithSign( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithRound( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Round sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithLeakyRelu( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - - - void populateNetworkWithMax( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x b e - g - y c f - d - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::MAX, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::MAX, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, -2 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 2 ); - nlr.setWeight( 0, 1, 1, 3, -3 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 0, 2 ); - - // Mark the Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - } - - void populateNetworkWithSoftmax( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SOFTMAX, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Softmax sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 0, 4, 1 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithBilinear( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x b e - g - y c f - d - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::BILINEAR, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, -2 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 2 ); - nlr.setWeight( 0, 1, 1, 3, -3 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 0, 2 ); - - // Mark the Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - } - - void populateNetworkWithAbsAndRelu( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Round/Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithRoundAndSign( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Round/Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithLeakyReluAndSigmoid( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the LeakyReLU/Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d - b f - y e - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::MAX, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - } - - - void populateNetworkWithReluAndBilinear( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d - b f - y e - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU/Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - } - - void test_evaluate_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetwork( nlr ); - - double input[2]; - double output[2]; - - // With ReLUs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With ReLUs, case 1 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); - - // With ReLUs, case 2 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); - } - - void test_evaluate_sigmoids() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSigmoids( nlr ); - - double input[2]; - double output[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); - - // case 3 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); - } - - void test_evaluate_abs() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbs( nlr ); - - double input[2]; - double output[2]; - - // With Abs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Abs, case 1 - input[0] = -2; - input[1] = -2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Abs, case 2 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); - } - - void test_evaluate_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSign( nlr ); - - double input[2]; - double output[2]; - - // With Sign, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Sign, case 1 - input[0] = -2; - input[1] = -2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Sign, case 2 (0 considered "non-negative", sign(0)=1) - input[0] = -1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); - } - - void test_evaluate_round() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRound( nlr ); - - double input[2]; - double output[2]; - - // With Round, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Round, case 1 - input[0] = 2.1; - input[1] = 1.4; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); - - // With Round, case 2 - input[0] = 2.1; - input[1] = 1.6; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); - } - - void test_evaluate_leaky_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyRelu( nlr ); - - double input[2]; - double output[2]; - - // With Leaky ReLU, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Leaky ReLU, case 1 (alpha=0.1) - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); - - // With Leaky ReLU, case 2 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); - } - - void test_evaluate_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithMax( nlr ); - - double input[2]; - double output[1]; - - // With Max, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); - - // With Max, case 1 - input[0] = 1; - input[1] = -3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); - - // With Max, case 2 - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); - } - - - void test_evaluate_softmax() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmax( nlr ); - - double input[2]; - double output[2]; - - // With Softmax, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); - - // With Softmax, case 1 - input[0] = 1; - input[1] = -3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); - - // With Softmax, case 2 - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); - } - - void test_evaluate_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithBilinear( nlr ); - - double input[2]; - double output[1]; - - // With Bilinear, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - - // With Bilinear, case 1 - input[0] = 0.1; - input[1] = -0.3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); - - // With Bilinear, case 2 - input[0] = -0.3; - input[1] = 0.3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); - } - - void test_evaluate_non_consecutive_layers() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 0, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Set the weights and relus - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 2, 3, 1, -2 ); - nlr.setWeight( 0, 1, 3, 1, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 2, 5, 0, 1 ); - - // Evaluate - double input[2]; - double output; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); - - input[0] = -1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); - - TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); - } - - void test_evaluate_abs_and_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbsAndRelu( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - } - - void test_evaluate_round_and_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRoundAndSign( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1.6; - input[1] = 1.4; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); - - input[0] = 1.6; - input[1] = 1.6; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); - } - - void test_evaluate_leaky_relu_and_sigmoid() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyReluAndSigmoid( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); - } - - void test_evaluate_softmax_and_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmaxAndMax( nlr ); - - double input[2]; - double output[1]; - - input[0] = 1; - input[1] = -3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); - - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); - } - - void test_evaluate_relu_and_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithReluAndBilinear( nlr ); - - double input[2]; - double output[1]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - } - - void test_store_into_other() - { - NLR::NetworkLevelReasoner nlr; - - populateNetwork( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - // With ReLUs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // With ReLUs, case 1 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_sigmoids() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSigmoids( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_round() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRound( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSign( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_abs() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbs( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_leaky_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyRelu( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithMax( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_softmax() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmax( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithBilinear( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_interval_arithmetic_bound_propagation_relu_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds2.size(), bounds.size() ); - for ( const auto &bound : expectedBounds2 ) - TS_ASSERT( bounds.exists( bound ) ); - } - - void test_interval_arithmetic_bound_propagation_abs_constraints() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Layer dependenices - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), - - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT_EQUALS( expectedBounds2.size(), bounds.size() ); - for ( const auto &bound : expectedBounds2 ) - TS_ASSERT( bounds.exists( bound ) ); - } - - void populateNetworkSBT( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - 2 R 1 - x0 --- x2 ---> x4 --- x6 - \ / / - 1 \ / / - \/ -1 / - /\ / - 3 / \ / - / \ R / - x1 --- x3 ---> x5 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 7 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - } - - void test_sbt_relus_all_active() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Both ReLUs active, bound survive through activations: - - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] - */ - - List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); - } - - void test_sbt_relus_active_and_inactive() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is inactive, bounds get zeroed - Second ReLU is active, bounds surive the activation - - x4.lb = 0 - x4.ub = 0 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] - */ - - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_sbt_relus_active_and_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is undecided, bound is concretized. - Coefficient: 12/(12--4) = 12/16 = 0.75 - Second ReLU is active, bounds surive the activation - - x4 range: [0, 12] - x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 - x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = 0.5x0 + 1.25x1 - 11.25 - x6.ub = 0.5x0 + 1.25x1 - 8.25 - - x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] - */ - - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_sbt_relus_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, one of the ReLU's variables has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is inactive (set externally), bounds get zeroed - Second ReLU is active, bounds surive the activation - - x4.lb = 0 - x4.ub = 0 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_sbt_abs_all_positive() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Both absolute values positive, bound survive through activations: - - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] - */ - - List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_sbt_abs_positive_and_negative() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First absolute value is negative, bounds get flipped - Second absolute value is positive, bounds surive the activation - - x4.lb = -2x0 -3x1 + 30 : [3, 19] - x4.ub = -2x0 -3x1 + 30 : [3, 19] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] - x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] - */ - - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 19, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_sbt_absolute_values_positive_and_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First absolute value is undecided, bounds are concretized. - Second ReLU is active, bounds surive the activation - - x4 range: [0, 12] - x4.lb = 0 - x4.ub = 12 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 + 12 : [ 1, 7] - - x6 range: [-11, 7] - */ - - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, 7, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_sbt_absolute_values_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, the weighted sum variable has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2 is eliminated, everything set to -3 - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Second absolute value is positive, bounds surive the activation - - x4: all set to 3 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 + 3 : [-8, -2] - x6.ub = - x0 - x1 + 3 : [-8, -2] - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, -2, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - printf( "Dumpign discovered bounds:\n" ); - for ( const auto &bound : bounds ) - bound.dump(); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_generate_input_query() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Variable indexing - - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - - // Set the weights and biases for the weighted sum layers - - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 1, 1, 0 ); - nlr.setBias( 1, 2, 0 ); - - nlr.setBias( 3, 0, 0 ); - nlr.setBias( 3, 1, 2 ); - - nlr.setBias( 5, 0, 0 ); - nlr.setBias( 5, 1, 0 ); - - // Mark the ReLU/Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Start the testing - Query ipq; - nlr.generateQuery( ipq ); - List unhandledEquations; - Set varsInUnhandledConstraints; - TS_ASSERT( - ipq.constructNetworkLevelReasoner( unhandledEquations, varsInUnhandledConstraints ) ); - NLR::NetworkLevelReasoner *reconstructedNlr = ipq.getNetworkLevelReasoner(); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - } - - void test_simulate_relus() - { - NLR::NetworkLevelReasoner nlr; - - populateNetwork( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With ReLUs, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With ReLUs, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 1 ) ); - } - - // With ReLUs, case 1 and 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0 ) ); - } - } - - void test_simulate_sigmoids() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSigmoids( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.6750, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 3.0167, - 0.0001 ) ); - } - - // case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.6032, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.5790, - 0.0001 ) ); - } - - // case 3 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.5045, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.1957, - 0.0001 ) ); - } - } - - void test_simulate_non_consecutive_layers() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 0, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Set the weights and relus - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 2, 3, 1, -2 ); - nlr.setWeight( 0, 1, 3, 1, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 2, 5, 0, 1 ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // Simulate1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2 ) ); - - // Simulate2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } - - void test_simulate_relus_and_abs() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU/Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // Simulate1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2 ) ); - } - - // Simulate2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 4 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - } - - void test_concretize_input_assignment() - { - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - - populateNetwork( nlr ); - - // With ReLUs, Inputs are zeros, only biases count - tableau.nextValues[0] = 0; - tableau.nextValues[1] = 0; - - Map assignment; - - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); - - TS_ASSERT( assignment.size() == 14 ); - TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 4 ) ); - - // With ReLUs, case 1 - tableau.nextValues[0] = 1; - tableau.nextValues[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); - - TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 1 ) ); - - // With ReLUs, case 2 - tableau.nextValues[0] = 1; - tableau.nextValues[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); - - TS_ASSERT( FloatUtils::areEqual( assignment[12], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 0 ) ); - } - - - void test_obtain_bound_from_ipq() - { - NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); - - Query query; - query.setNumberOfVariables( 14 ); - - - // Initialize the bounds - query.setLowerBound( 0, -1 ); - query.setUpperBound( 0, 1 ); - query.setLowerBound( 1, -1 ); - query.setUpperBound( 1, 1 ); - - double large = 1000; - query.setLowerBound( 2, -large ); - query.setUpperBound( 2, large ); - query.setLowerBound( 3, -large ); - query.setUpperBound( 3, large ); - query.setLowerBound( 4, -large ); - query.setUpperBound( 4, large ); - query.setLowerBound( 5, -large ); - query.setUpperBound( 5, large ); - query.setLowerBound( 6, -large ); - query.setUpperBound( 6, large ); - query.setLowerBound( 7, -large ); - query.setUpperBound( 7, large ); - query.setLowerBound( 8, -large ); - query.setUpperBound( 8, large ); - query.setLowerBound( 9, -large ); - query.setUpperBound( 9, large ); - query.setLowerBound( 10, -large ); - query.setUpperBound( 10, large ); - query.setLowerBound( 11, -large ); - query.setUpperBound( 11, large ); - query.setLowerBound( 12, -large ); - query.setUpperBound( 12, large ); - query.setLowerBound( 13, -large ); - query.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds( query ) ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); - } -}; From 80be6dc2f9b36113fec67121020124eb25428ea3 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:01:33 +0300 Subject: [PATCH 03/81] Added assginment, store-into-other NLR tests for every activation function. --- src/nlr/tests/Test_NetworkLevelReasoner.h | 1720 +++++++++++++++++++-- 1 file changed, 1574 insertions(+), 146 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 8afb4e0328..5e98d7fb3a 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -187,219 +187,1624 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } + + void populateNetworkWithAbs( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithSign( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithRound( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithLeakyRelu( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + + + void populateNetworkWithMax( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x b e + g + y c f + d + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::MAX, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -2 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 2 ); + nlr.setWeight( 0, 1, 1, 3, -3 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 0, 2 ); + + // Mark the Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void populateNetworkWithSoftmax( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SOFTMAX, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithBilinear( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x b e + g + y c f + d + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::BILINEAR, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -2 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 2 ); + nlr.setWeight( 0, 1, 1, 3, -3 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 0, 2 ); + + // Mark the Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void populateNetworkWithAbsAndRelu( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round/Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithRoundAndSign( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round/Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithLeakyReluAndSigmoid( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the LeakyReLU/Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d + b f + y e + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + + void populateNetworkWithReluAndBilinear( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d + b f + y e + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void test_evaluate_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + double input[2]; + double output[2]; + + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With ReLUs, case 1 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); + + // With ReLUs, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); + } + + void test_evaluate_sigmoids() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSigmoids( nlr ); + + double input[2]; + double output[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); + + // case 3 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); + } + + void test_evaluate_abs() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbs( nlr ); + + double input[2]; + double output[2]; + + // With Abs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Abs, case 1 + input[0] = -2; + input[1] = -2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Abs, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); + } + + void test_evaluate_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSign( nlr ); + + double input[2]; + double output[2]; + + // With Sign, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Sign, case 1 + input[0] = -2; + input[1] = -2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Sign, case 2 (0 considered "non-negative", sign(0)=1) + input[0] = -1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); + } + + void test_evaluate_round() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRound( nlr ); + + double input[2]; + double output[2]; + + // With Round, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Round, case 1 + input[0] = 2.1; + input[1] = 1.4; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); + + // With Round, case 2 + input[0] = 2.1; + input[1] = 1.6; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); + } + + void test_evaluate_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyRelu( nlr ); + + double input[2]; + double output[2]; + + // With Leaky ReLU, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Leaky ReLU, case 1 (alpha=0.1) + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); + + // With Leaky ReLU, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); + } + + void test_evaluate_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithMax( nlr ); - void test_evaluate_relus() + double input[2]; + double output[1]; + + // With Max, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); + + // With Max, case 1 + input[0] = 1; + input[1] = -3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); + + // With Max, case 2 + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); + } + + + void test_evaluate_softmax() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmax( nlr ); + + double input[2]; + double output[2]; + + // With Softmax, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); + + // With Softmax, case 1 + input[0] = 1; + input[1] = -3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); + + // With Softmax, case 2 + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); + } + + void test_evaluate_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithBilinear( nlr ); + + double input[2]; + double output[1]; + + // With Bilinear, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + + // With Bilinear, case 1 + input[0] = 0.1; + input[1] = -0.3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); + + // With Bilinear, case 2 + input[0] = -0.3; + input[1] = 0.3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); + } + + void test_evaluate_non_consecutive_layers() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 0, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Set the weights and relus + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 2, 3, 1, -2 ); + nlr.setWeight( 0, 1, 3, 1, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 2, 5, 0, 1 ); + + // Evaluate + double input[2]; + double output; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); + + input[0] = -1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); + + TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); + } + + void test_evaluate_abs_and_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbsAndRelu( nlr ); + + double input[2]; + double output[2]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + } + + void test_evaluate_round_and_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRoundAndSign( nlr ); + + double input[2]; + double output[2]; + + input[0] = 1.6; + input[1] = 1.4; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); + + input[0] = 1.6; + input[1] = 1.6; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); + } + + void test_evaluate_leaky_relu_and_sigmoid() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + double input[2]; + double output[2]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); + } + + void test_evaluate_softmax_and_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmaxAndMax( nlr ); + + double input[2]; + double output[1]; + + input[0] = 1; + input[1] = -3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); + + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); + } + + void test_evaluate_relu_and_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithReluAndBilinear( nlr ); + + double input[2]; + double output[1]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + } + + void test_store_into_other() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // With ReLUs, case 1 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_sigmoids() { NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); + populateNetworkWithSigmoids( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); double input[2]; - double output[2]; + double output1[2]; + double output2[2]; - // With ReLUs, Inputs are zeros, only biases count + // case 1 input[0] = 0; input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - // With ReLUs, case 1 + // case 2 input[0] = 1; input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); - - // With ReLUs, case 2 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - - void test_evaluate_sigmoids() + + void test_store_into_other_with_round() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithSigmoids( nlr ); + populateNetworkWithRound( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); double input[2]; - double output[2]; + double output1[2]; + double output2[2]; // case 1 input[0] = 0; input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); // case 2 input[0] = 1; input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); - - // case 3 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - - void test_evaluate_non_consecutive_layers() + + void test_store_into_other_with_sign() { NLR::NetworkLevelReasoner nlr; - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 0, 4 ); - nlr.addLayerDependency( 4, 5 ); + populateNetworkWithSign( nlr ); - // Set the weights and relus - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + NLR::NetworkLevelReasoner nlr2; - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 2, 3, 1, -2 ); - nlr.setWeight( 0, 1, 3, 1, 1 ); + double input[2]; + double output1[2]; + double output2[2]; - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); + // case 1 + input[0] = 0; + input[1] = 0; - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 2, 5, 0, 1 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Evaluate - double input[2]; - double output; + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + // case 2 input[0] = 1; input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - input[0] = -1; - input[1] = 2; + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_abs() + { + NLR::NetworkLevelReasoner nlr; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); + populateNetworkWithAbs( nlr ); - TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); + NLR::NetworkLevelReasoner nlr2; - TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); - } + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - void test_evaluate_relus_and_abs() - { - NLR::NetworkLevelReasoner nlr; + double input[2]; + double output1[2]; + double output2[2]; - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + // case 1 + input[0] = 0; + input[1] = 0; - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); + // case 2 + input[0] = 1; + input[1] = 1; - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; - // Mark the ReLU/Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); + populateNetworkWithLeakyRelu( nlr ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); double input[2]; - double output[2]; + double output1[2]; + double output2[2]; - input[0] = 1; - input[1] = 1; + // case 1 + input[0] = 0; + input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + // case 2 input[0] = 1; - input[1] = 2; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - - void test_store_into_other() + + void test_store_into_other_with_max() { NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); + populateNetworkWithMax( nlr ); NLR::NetworkLevelReasoner nlr2; @@ -409,7 +1814,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite double output1[2]; double output2[2]; - // Inputs are zeros, only biases count + // case 1 input[0] = 0; input[1] = 0; @@ -419,9 +1824,32 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_softmax() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmax( nlr ); + + NLR::NetworkLevelReasoner nlr2; + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - // With ReLUs, Inputs are zeros, only biases count + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 input[0] = 0; input[1] = 0; @@ -431,7 +1859,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - // With ReLUs, case 1 + // case 2 input[0] = 1; input[1] = 1; @@ -441,12 +1869,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - - void test_store_into_other_with_sigmoids() + + void test_store_into_other_with_bilinear() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithSigmoids( nlr ); + populateNetworkWithBilinear( nlr ); NLR::NetworkLevelReasoner nlr2; @@ -476,7 +1904,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - + void test_interval_arithmetic_bound_propagation_relu_constraints() { NLR::NetworkLevelReasoner nlr; From 9fbca7be1144040c6817e25564a7e8a723364306 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:05:59 +0300 Subject: [PATCH 04/81] Add files via upload 0-2-0: Implementing layer simulations for every activation function. --- src/nlr/Layer.cpp | 57 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 01b6a6aaf6..f7dd40b563 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -399,6 +399,63 @@ void Layer::computeSimulations() _simulations[i][j] = 1 / ( 1 + std::exp( -simulations.get( j ) ) ); } } + else if ( _type == ROUND ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Vector &simulations = + ( *( _layerOwner->getLayer( sourceIndex._layer )->getSimulations() ) ).get( i ); + for ( unsigned j = 0; j < simulationSize; ++j ) + _simulations[i][j] = FloatUtils::round( simulations.get( j ) ); + } + } + else if ( _type == SOFTMAX ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + for ( unsigned j = 0; j < simulationSize; ++j ) + { + _simulations[i][j] = FloatUtils::negativeInfinity(); + + Vector inputs; + Vector outputs; + unsigned outputIndex = 0; + unsigned index = 0; + + for ( const auto &input : _neuronToActivationSources[i] ) + { + if ( input._neuron == i ) + outputIndex = index; + double value = ( *( _layerOwner->getLayer( input._layer )->getSimulations() ) ) + .get( input._neuron ) + .get( j ); + inputs.append( value ); + ++index; + } + + SoftmaxConstraint::softmax( inputs, outputs ); + _simulations[i][j] = outputs[outputIndex]; + } + } + } + else if ( _type == BILINEAR ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + for ( unsigned j = 0; j < simulationSize; ++j ) + { + _simulations[i][j] = 1; + for ( const auto &input : _neuronToActivationSources[i] ) + { + double value = ( *( _layerOwner->getLayer( input._layer )->getSimulations() ) ) + .get( input._neuron ) + .get( j ); + _simulations[i][j] *= value; + } + } + } + } else { printf( "Error! Neuron type %u unsupported\n", _type ); From f7aee6d5832f2e84892f5623b7abb22686bcb79d Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:08:19 +0300 Subject: [PATCH 05/81] Add files via upload 0-2-1: Testing layer simulations for every activation function. --- src/nlr/tests/Test_NetworkLevelReasoner.h | 722 ++++++++++++++++++++-- 1 file changed, 682 insertions(+), 40 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 5e98d7fb3a..96e698c7ac 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -3198,7 +3198,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); } - void test_simulate_relus() + void test_simulate_relu() { NLR::NetworkLevelReasoner nlr; @@ -3347,6 +3347,496 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0.0001 ) ); } } + + void test_simulate_round() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRound( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Round, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Round, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 2.1 ) ); + simulations2.append( Vector( simulationSize, 1.4 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4, + 0.0001 ) ); + } + + // With Round, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 2.1 ) ); + simulations3.append( Vector( simulationSize, 1.6 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -12, + 0.0001 ) ); + } + } + + void test_simulate_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSign( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Sign, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Sign, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, -2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Sign, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -1 ) ); + simulations3.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4 ) ); + } + } + + void test_simulate_abs() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbs( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Abs, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Abs, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, -2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Abs, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 4 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 10 ) ); + } + } + + void test_simulate_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyRelu( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Leaky ReLU, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Leaky ReLU, case 1 (alpha=0.1) + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.9, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0.57, + 0.0001 ) ); + } + + // With Leaky ReLU, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -0.04, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -0.76, + 0.0001 ) ); + } + } + + void test_simulate_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithMax( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Max, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -3 ) ); + } + + // With Max, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, -3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -18 ) ); + } + + // With Max, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -3 ) ); + simulations3.append( Vector( simulationSize, 3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -5 ) ); + } + } + + void test_simulate_softmax() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmax( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Softmax, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.2999, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.4001, + 0.0001 ) ); + } + + // With Softmax, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, -3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.1192, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.7615, + 0.0001 ) ); + } + + // With Softmax, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -3 ) ); + simulations3.append( Vector( simulationSize, 3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.1206, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.7588, + 0.0001 ) ); + } + } + + void test_simulate_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithBilinear( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Bilinear, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } + + // With Bilinear, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 0.1 ) ); + simulations2.append( Vector( simulationSize, -0.3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2.8304, + 0.0001 ) ); + } + + // With Bilinear, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -0.3 ) ); + simulations3.append( Vector( simulationSize, 0.3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.0912, + 0.0001 ) ); + } + } void test_simulate_non_consecutive_layers() { @@ -3423,53 +3913,117 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0 ) ); } - void test_simulate_relus_and_abs() + void test_simulate_abs_and_relu() { NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbsAndRelu( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + // Simulate1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2 ) ); + } - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 4 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + } + + void test_simulate_round_and_sign() + { + NLR::NetworkLevelReasoner nlr; - // Mark the ReLU/Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); + populateNetworkWithRoundAndSign( nlr ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Round/Sign, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1.6 ) ); + simulations1.append( Vector( simulationSize, 1.4 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -2 ) ); + } + + // With Round/Sign, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1.6 ) ); + simulations2.append( Vector( simulationSize, 1.6 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4 ) ); + } + } + + void test_simulate_leaky_relu_and_sigmoid() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyReluAndSigmoid( nlr ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // Simulate1 + // With LeakyReLU/Sigmoid, case 1 Vector> simulations1; simulations1.append( Vector( simulationSize, 1 ) ); simulations1.append( Vector( simulationSize, 1 ) ); @@ -3482,15 +4036,17 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 2 ) ); + 0.7109, + 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 2 ) ); + 1.4602, + 0.0001 ) ); } - // Simulate2 + // With LeakyReLU/Sigmoid, case 2 Vector> simulations2; simulations2.append( Vector( simulationSize, 1 ) ); simulations2.append( Vector( simulationSize, 2 ) ); @@ -3503,12 +4059,98 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 4 ) ); + 0.4013, + 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 4 ) ); + 0.6508, + 0.0001 ) ); + } + } + + void test_simulate_softmax_and_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmaxAndMax( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With LeakyReLU/Sigmoid, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, -3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -2.9998, + 0.0001 ) ); + } + + // With LeakyReLU/Sigmoid, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -3 ) ); + simulations2.append( Vector( simulationSize, 3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1, + 0.0001 ) ); + } + } + + void test_simulate_relu_and_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithReluAndBilinear( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Relu/Bilinear, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + } + + // With ReLU/Bilinear, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); } } From 33f5d5f897dac7dfe4ab97980ebf77e7fc52a70b Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:11:35 +0300 Subject: [PATCH 06/81] Add files via upload 0-3-0: Implementing interval arithmetic for all activation functions. --- src/nlr/Layer.cpp | 752 +++++++++++++++++++++++++++++++++++++++++++++- src/nlr/Layer.h | 82 ++++- 2 files changed, 809 insertions(+), 25 deletions(-) diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index f7dd40b563..73f787595f 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -724,8 +724,30 @@ void Layer::computeIntervalArithmeticBounds() case SIGN: computeIntervalArithmeticBoundsForSign(); break; - + + case ROUND: + computeIntervalArithmeticBoundsForRound(); + break; + + case LEAKY_RELU: + computeIntervalArithmeticBoundsForLeakyRelu(); + break; + + case SIGMOID: + computeIntervalArithmeticBoundsForSigmoid(); + break; + case MAX: + computeIntervalArithmeticBoundsForMax(); + break; + + case SOFTMAX: + computeIntervalArithmeticBoundsForSoftmax(); + break; + + case BILINEAR: + computeIntervalArithmeticBoundsForBilinear(); + break; default: printf( "Error! Activation type %u unsupported\n", _type ); @@ -779,13 +801,13 @@ void Layer::computeIntervalArithmeticBoundsForWeightedSum() if ( _eliminatedNeurons.exists( i ) ) continue; - if ( newLb[i] > _lb[i] ) + if ( _lb[i] < newLb[i] ) { _lb[i] = newLb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( newUb[i] < _ub[i] ) + if ( _ub[i] > newUb[i] ) { _ub[i] = newUb[i]; _layerOwner->receiveTighterBound( @@ -813,13 +835,13 @@ void Layer::computeIntervalArithmeticBoundsForRelu() if ( lb < 0 ) lb = 0; - if ( lb > _lb[i] ) + if ( _lb[i] < lb ) { _lb[i] = lb; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( ub < _ub[i] ) + if ( _ub[i] > ub ) { _ub[i] = ub; _layerOwner->receiveTighterBound( @@ -843,13 +865,13 @@ void Layer::computeIntervalArithmeticBoundsForAbs() if ( lb > 0 ) { - if ( lb > _lb[i] ) + if ( _lb[i] < lb ) { _lb[i] = lb; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( ub < _ub[i] ) + if ( _ub[i] > ub ) { _ub[i] = ub; _layerOwner->receiveTighterBound( @@ -858,13 +880,13 @@ void Layer::computeIntervalArithmeticBoundsForAbs() } else if ( ub < 0 ) { - if ( -ub > _lb[i] ) + if ( _lb[i] < -ub ) { _lb[i] = -ub; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( -lb < _ub[i] ) + if ( _ub[i] > -lb ) { _ub[i] = -lb; _layerOwner->receiveTighterBound( @@ -881,7 +903,7 @@ void Layer::computeIntervalArithmeticBoundsForAbs() Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( FloatUtils::max( ub, -lb ) < _ub[i] ) + if ( _ub[i] > FloatUtils::max( ub, -lb ) ) { _ub[i] = FloatUtils::max( ub, -lb ); _layerOwner->receiveTighterBound( @@ -903,21 +925,408 @@ void Layer::computeIntervalArithmeticBoundsForSign() double lb = sourceLayer->getLb( sourceIndex._neuron ); double ub = sourceLayer->getUb( sourceIndex._neuron ); + + double new_lb; + double new_ub; if ( !FloatUtils::isNegative( lb ) ) { - _lb[i] = 1; - _ub[i] = 1; + new_lb = 1; + new_ub = 1; } else if ( FloatUtils::isNegative( ub ) ) { - _lb[i] = -1; - _ub[i] = -1; + new_lb = -1; + new_ub = -1; } else { - _lb[i] = -1; - _ub[i] = 1; + new_lb = -1; + new_ub = 1; + } + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < new_lb ) + { + _lb[i] = new_lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > new_ub ) + { + _ub[i] = new_ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeIntervalArithmeticBoundsForLeakyRelu() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + double lb = sourceLayer->getLb( sourceIndex._neuron ); + double ub = sourceLayer->getUb( sourceIndex._neuron ); + + if ( lb > 0 ) + { + if ( _lb[i] < lb ) + { + _lb[i] = lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > ub ) + { + _ub[i] = ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + else if ( ub < 0 ) + { + if ( _lb[i] < _alpha * lb ) + { + _lb[i] = _alpha * lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > _alpha * ub ) + { + _ub[i] = _alpha * ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + else + { + // lb < 0 < ub + if ( _lb[i] < _alpha * lb ) + { + _lb[i] = _alpha * lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > ub ) + { + _ub[i] = ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +void Layer::computeIntervalArithmeticBoundsForSigmoid() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + double lb = sourceLayer->getLb( sourceIndex._neuron ); + double ub = sourceLayer->getUb( sourceIndex._neuron ); + + double lbSigmoid = SigmoidConstraint::sigmoid( lb ); + double ubSigmoid = SigmoidConstraint::sigmoid( ub ); + + + if ( _lb[i] < lbSigmoid ) + { + _lb[i] = lbSigmoid; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > ubSigmoid ) + { + _ub[i] = ubSigmoid; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + + +void Layer::computeIntervalArithmeticBoundsForRound() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + double lb = sourceLayer->getLb( sourceIndex._neuron ); + double ub = sourceLayer->getUb( sourceIndex._neuron ); + + double lbRound = FloatUtils::round( lb ); + double ubRound = FloatUtils::round( ub ); + + + if ( _lb[i] < lbRound ) + { + _lb[i] = lbRound; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > ubRound ) + { + _ub[i] = ubRound; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + + +void Layer::computeIntervalArithmeticBoundsForMax() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); + double maxLowerBound = FloatUtils::negativeInfinity(); + double maxUpperBound = FloatUtils::negativeInfinity(); + + Map sourceLbs; + Map sourceUbs; + unsigned counter = 0; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs[sourceIndex] = sourceLb; + sourceUbs[sourceIndex] = sourceUb; + + if ( maxLowerBound < sourceLb ) + { + indexOfMaxLowerBound = sourceIndex; + maxLowerBound = sourceLb; + } + if ( maxUpperBound < sourceUb ) + { + maxUpperBound = sourceUb; + } + ++counter; + } + + // The phase is fixed if the lower-bound of a source variable x_b is + // larger than the upper-bounds of the other source variables. + bool phaseFixed = true; + for ( const auto &sourceIndex : sources ) + { + if ( sourceIndex != indexOfMaxLowerBound && + FloatUtils::gt( sourceUbs[sourceIndex], maxLowerBound ) ) + { + phaseFixed = false; + break; + } + } + + if ( phaseFixed ) + { + // Phase fixed + // Concrete bound: lb_b <= x_f <= ub_b + if ( _lb[i] < maxLowerBound ) + { + _lb[i] = maxLowerBound; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > sourceUbs[indexOfMaxLowerBound] ) + { + _ub[i] = sourceUbs[indexOfMaxLowerBound]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + else + { + // MaxPool not fixed + // Concrete bounds: lb_b <= x_f <= maxUpperBound + if ( _lb[i] < maxLowerBound ) + { + _lb[i] = maxLowerBound; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > maxUpperBound ) + { + _ub[i] = maxUpperBound; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +void Layer::computeIntervalArithmeticBoundsForSoftmax() +{ + Set handledInputNeurons; + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + ASSERT( sourceLayer->getSize() == _size ); + + Vector sourceLbs; + Vector sourceUbs; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + } + + // Find the index of i in the softmax + unsigned index = 0; + for ( const auto &sourceIndex : sources ) + { + if ( handledInputNeurons.exists( sourceIndex._neuron ) ) + ++index; + else + { + handledInputNeurons.insert( sourceIndex._neuron ); + break; + } + } + + double lb = softmaxLinearLowerBound( sourceLbs, sourceUbs, index ); + double ub = softmaxLinearUpperBound( sourceLbs, sourceUbs, index ); + if ( _lb[i] < lb ) + { + _lb[i] = lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > ub ) + { + _ub[i] = ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeIntervalArithmeticBoundsForBilinear() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + ASSERT( sources.size() == 2 ); + + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + double value = sourceValues[0] * sourceValues[1]; + + if ( _lb[i] < value ) + { + _lb[i] = value; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > value ) + { + _ub[i] = value; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + continue; + } + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + + if ( _lb[i] < lb ) + { + _lb[i] = lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > ub ) + { + _ub[i] = ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); } } } @@ -1680,6 +2089,317 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } +double Layer::softmaxLSELowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + double sum = 0; + for ( unsigned j = 0; j < inputs.size(); ++j ) + { + double lj = inputLbs[j]; + double uj = inputUbs[j]; + double xj = inputs[j]; + sum += + ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); + } + + return std::exp( inputs[i] ) / sum; +} + +double Layer::softmaxdLSELowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) +{ + double val = 0; + if ( i == di ) + val += softmaxLSELowerBound( inputMids, inputLbs, inputUbs, i ); + + double ldi = inputLbs[di]; + double udi = inputUbs[di]; + + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + double lj = inputLbs[j]; + double uj = inputUbs[j]; + double xj = inputMids[j]; + + sum += + ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); + } + + val -= std::exp( inputMids[i] ) / ( sum * sum ) * ( std::exp( udi ) - std::exp( ldi ) ) / + ( udi - ldi ); + + return val; +} + +double Layer::softmaxLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + double max = FloatUtils::negativeInfinity(); + unsigned maxInputIndex = 0; + unsigned index = 0; + for ( const auto &mid : inputMids ) + { + if ( mid > max ) + { + max = mid; + maxInputIndex = index; + } + ++index; + } + + if ( maxInputIndex == i ) + return softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); + else + { + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + sum += 1; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + double xjjstar = inputMids[j] - inputMids[maxInputIndex]; + + sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + + ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); + } + } + + return std::exp( inputMids[i] - inputMids[maxInputIndex] ) / sum; + } +} + +double Layer::softmaxdLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) +{ + double max = FloatUtils::negativeInfinity(); + unsigned maxInputIndex = 0; + unsigned index = 0; + for ( const auto &mid : inputMids ) + { + if ( mid > max ) + { + max = mid; + maxInputIndex = index; + } + ++index; + } + + if ( maxInputIndex == i ) + return softmaxdERLowerBound( inputMids, inputLbs, inputUbs, i, di ); + else + { + double val = softmaxLSELowerBound2( inputMids, inputLbs, inputUbs, i ); + + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + sum += 1; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + double xjjstar = inputMids[j] - inputMids[maxInputIndex]; + sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + + ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); + } + } + double val2 = std::exp( inputMids[i] - inputMids[maxInputIndex] ) / ( sum * sum ); + + if ( i == di ) + { + double ldijstar = inputLbs[i] - inputUbs[maxInputIndex]; + double udijstar = inputUbs[i] - inputLbs[maxInputIndex]; + return val - + val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / ( udijstar - ldijstar ); + } + else if ( maxInputIndex == di ) + { + double sum2 = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + continue; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + sum2 += ( std::exp( ujjstar ) - std::exp( ljjstar ) ) / ( ujjstar - ljjstar ); + } + } + return -val + val2 * sum2; + } + else + { + double ldijstar = inputLbs[di] - inputUbs[maxInputIndex]; + double udijstar = inputUbs[di] - inputLbs[maxInputIndex]; + return -val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / + ( udijstar - ldijstar ); + } + } +} + +double Layer::softmaxLSEUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) +{ + double li = outputLb[i]; + double ui = outputUb[i]; + + Vector inputTilda; + SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); + + return ( ( li * std::log( ui ) - ui * std::log( li ) ) / ( std::log( ui ) - std::log( li ) ) - + ( ui - li ) / ( std::log( ui ) - std::log( li ) ) * + SoftmaxConstraint::logSumOfExponential( inputTilda ) ); +} + +double Layer::softmaxdLSEUpperbound( const Vector &inputMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ) +{ + double li = outputLb[i]; + double ui = outputUb[i]; + + double val = -( ui - li ) / ( std::log( ui ) - std::log( li ) ); + + double val2 = std::exp( inputMids[di] ) / SoftmaxConstraint::sumOfExponential( inputMids ); + if ( i == di ) + val2 -= 1; + + return val * val2; +} + +double Layer::softmaxERLowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + Vector inputTilda; + SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); + + double sum = 0; + for ( unsigned j = 0; j < inputs.size(); ++j ) + { + if ( i == j ) + sum += 1; + else + { + double ljTilda = inputLbs[j] - inputUbs[i]; + double ujTilda = inputUbs[j] - inputLbs[i]; + double xjTilda = inputTilda[j]; + + sum += ( ujTilda - xjTilda ) / ( ujTilda - ljTilda ) * std::exp( ljTilda ) + + ( xjTilda - ljTilda ) / ( ujTilda - ljTilda ) * std::exp( ujTilda ); + } + } + + return 1 / sum; +} + +double Layer::softmaxdERLowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) +{ + double val = softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); + + if ( i != di ) + { + double ldiTilda = inputLbs[di] - inputUbs[i]; + double udiTilda = inputUbs[di] - inputLbs[i]; + return -val * val * ( std::exp( udiTilda ) - std::exp( ldiTilda ) ) / + ( udiTilda - ldiTilda ); + } + else + { + double val2 = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j != i ) + { + double ljTilda = inputLbs[j] - inputUbs[i]; + double ujTilda = inputUbs[j] - inputLbs[i]; + val2 += ( std::exp( ujTilda ) - std::exp( ljTilda ) ) / ( ujTilda - ljTilda ); + } + } + return val * val * val2; + } +} + +double Layer::softmaxERUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) +{ + double li = outputLb[i]; + double ui = outputUb[i]; + + Vector inputTilda; + SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); + + return ui + li - ui * li * SoftmaxConstraint::sumOfExponential( inputTilda ); +} + +double Layer::softmaxdERUpperBound( const Vector &inputMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ) +{ + double li = outputLb[i]; + double ui = outputUb[i]; + + + if ( i == di ) + { + double val2 = -1; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + val2 += std::exp( inputMids[j] - inputMids[i] ); + return li * ui * val2; + } + else + return -li * ui * std::exp( inputMids[di] - inputMids[i] ); +} + +double Layer::softmaxLinearLowerBound( const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + Vector uTilda; + SoftmaxConstraint::xTilda( inputUbs, inputLbs[i], uTilda ); + uTilda[i] = 0; + return 1 / SoftmaxConstraint::sumOfExponential( uTilda ); +} + +double Layer::softmaxLinearUpperBound( const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + Vector lTilda; + SoftmaxConstraint::xTilda( inputLbs, inputUbs[i], lTilda ); + lTilda[i] = 0; + return 1 / SoftmaxConstraint::sumOfExponential( lTilda ); +} + void Layer::eliminateVariable( unsigned variable, double value ) { if ( !_variableToNeuron.exists( variable ) ) diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index e52ea01038..ceb16b3d68 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -138,7 +138,7 @@ class Layer void obtainCurrentBounds(); void computeSymbolicBounds(); void computeIntervalArithmeticBounds(); - + /* Preprocessing functionality: variable elimination and reindexing */ @@ -207,16 +207,74 @@ class Layer void allocateMemory(); void freeMemoryIfNeeded(); - + /* - Helper functions for symbolic bound tightening + The following methods compute concrete softmax output bounds + using different linear approximation, as well as the coefficients + of softmax inputs in the symbolic bounds */ - void comptueSymbolicBoundsForInput(); - void computeSymbolicBoundsForRelu(); - void computeSymbolicBoundsForSign(); - void computeSymbolicBoundsForAbsoluteValue(); - void computeSymbolicBoundsForWeightedSum(); - void computeSymbolicBoundsDefault(); + double softmaxLSELowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ); + + double softmaxdLSELowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ); + + double softmaxLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ); + + double softmaxdLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ); + + double softmaxLSEUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ); + + double softmaxdLSEUpperbound( const Vector &inputMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ); + + double softmaxERLowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ); + + double softmaxdERLowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ); + + double softmaxERUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ); + + double softmaxdERUpperBound( const Vector &inputMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ); + + double softmaxLinearLowerBound( const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ); + + double softmaxLinearUpperBound( const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ); /* Helper functions for interval bound tightening @@ -225,6 +283,12 @@ class Layer void computeIntervalArithmeticBoundsForRelu(); void computeIntervalArithmeticBoundsForAbs(); void computeIntervalArithmeticBoundsForSign(); + void computeIntervalArithmeticBoundsForMax(); + void computeIntervalArithmeticBoundsForLeakyRelu(); + void computeIntervalArithmeticBoundsForSigmoid(); + void computeIntervalArithmeticBoundsForRound(); + void computeIntervalArithmeticBoundsForSoftmax(); + void computeIntervalArithmeticBoundsForBilinear(); const double *getSymbolicLb() const; const double *getSymbolicUb() const; From 6d1c898922aef6c34c6c4cb5e7d809b5a593ddfe Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:13:47 +0300 Subject: [PATCH 07/81] Add files via upload 0-3-1: Adding interval arithmetic tests for all activation functions. --- src/nlr/tests/Test_NetworkLevelReasoner.h | 3999 ++++++++++++++------- 1 file changed, 2775 insertions(+), 1224 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 96e698c7ac..33554ba680 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -1904,22 +1904,60 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - - void test_interval_arithmetic_bound_propagation_relu_constraints() + + void populateNetworkSBT( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); + /* + 2 R 1 + x0 --- x2 ---> x4 --- x6 + \ / / + 1 \ / / + \/ -1 / + /\ / + 3 / \ / + / \ R / + x1 --- x3 ---> x5 + 1 + */ - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); - double large = 1000; + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -1930,47 +1968,67 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + } + + void test_sbt_relus_all_active() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); - // Initialize + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Layer 1: - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + Both ReLUs active, bound survive through activations: - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), } ); List bounds; @@ -1979,294 +2037,247 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); for ( const auto &bound : expectedBounds ) TS_ASSERT( bounds.exists( bound ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + void test_sbt_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); - // Initialize + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Layer 1: - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT_EQUALS( expectedBounds2.size(), bounds.size() ); - for ( const auto &bound : expectedBounds2 ) - TS_ASSERT( bounds.exists( bound ) ); + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); } - void test_interval_arithmetic_bound_propagation_abs_constraints() + void test_sbt_relus_active_and_not_fixed() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); + /* + Input ranges: - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); + x0: [4, 6] + x1: [1, 5] - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); + Layer 1: - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - // Layer dependenices - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 4, 5 ); + First ReLU is undecided, bound is concretized. + Coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + x4 range: [0, 12] + x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + Layer 2: - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + x6.lb = 0.5x0 + 1.25x1 - 11.25 + x6.ub = 0.5x0 + 1.25x1 - 8.25 - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] + */ - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - nlr.setTableau( &tableau ); + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + void test_sbt_relus_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + /* + Input ranges: - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), - } ); + x0: [4, 6] + x1: [1, 5] - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); + Layer 1: - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + x4.lb = 0 + x4.ub = 0 - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + Layer 2: - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + List expectedBounds( { + // x2 does not appear, because it has been eliminated - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT_EQUALS( expectedBounds2.size(), bounds.size() ); - for ( const auto &bound : expectedBounds2 ) - TS_ASSERT( bounds.exists( bound ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); } - void populateNetworkSBT( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + void test_sbt_abs_all_positive() { - /* - 2 R 1 - x0 --- x2 ---> x4 --- x6 - \ / / - 1 \ / / - \/ -1 / - /\ / - 3 / \ / - / \ R / - x1 --- x3 ---> x5 - 1 - */ + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); // Mark layer dependencies @@ -2300,7 +2311,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Very loose bounds for neurons except inputs double large = 1000000; - tableau.getBoundManager().initialize( 7 ); tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -2311,16 +2321,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - } - - void test_sbt_relus_all_active() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); tableau.setLowerBound( 0, 4 ); tableau.setUpperBound( 0, 6 ); @@ -2345,7 +2345,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x3.lb = x0 + x1 : [5, 11] x3.ub = x0 + x1 : [5, 11] - Both ReLUs active, bound survive through activations: + Both absolute values positive, bound survive through activations: x4.lb = 2x0 + 3x1 : [11, 27] x4.ub = 2x0 + 3x1 : [11, 27] @@ -2378,18 +2378,66 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); } - void test_sbt_relus_active_and_inactive() + void test_sbt_abs_positive_and_negative() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); tableau.setLowerBound( 0, 4 ); tableau.setUpperBound( 0, 6 ); @@ -2417,19 +2465,19 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x3.lb = x0 + x1 : [5, 11] x3.ub = x0 + x1 : [5, 11] - First ReLU is inactive, bounds get zeroed - Second ReLU is active, bounds surive the activation + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation - x4.lb = 0 - x4.ub = 0 + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] x5.lb = x0 + x1 : [5, 11] x5.ub = x0 + x1 : [5, 11] Layer 2: - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] */ List expectedBounds( { @@ -2438,120 +2486,88 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 3, 5, Tightening::LB ), Tightening( 3, 11, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), Tightening( 5, 5, Tightening::LB ), Tightening( 5, 11, Tightening::UB ), - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) TS_ASSERT( expectedBounds.exists( bound ) ); } - void test_sbt_relus_active_and_not_fixed() + void test_sbt_absolute_values_positive_and_not_fixed() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is undecided, bound is concretized. - Coefficient: 12/(12--4) = 12/16 = 0.75 - Second ReLU is active, bounds surive the activation - - x4 range: [0, 12] - x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 - x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - x6.lb = 0.5x0 + 1.25x1 - 11.25 - x6.ub = 0.5x0 + 1.25x1 - 8.25 + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); - x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] - */ + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 1, Tightening::UB ), - } ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - void test_sbt_relus_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + // Very loose bounds for neurons except inputs + double large = 1000000; - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); tableau.setLowerBound( 0, 4 ); tableau.setUpperBound( 0, 6 ); tableau.setLowerBound( 1, 1 ); tableau.setUpperBound( 1, 5 ); - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + // Strong negative bias for x2, which is node (1,0) nlr.setBias( 1, 0, -15 ); - // However, one of the ReLU's variables has been eliminated - nlr.eliminateVariable( 2, -3 ); - // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); @@ -2570,34 +2586,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x3.lb = x0 + x1 : [5, 11] x3.ub = x0 + x1 : [5, 11] - First ReLU is inactive (set externally), bounds get zeroed + First absolute value is undecided, bounds are concretized. Second ReLU is active, bounds surive the activation + x4 range: [0, 12] x4.lb = 0 - x4.ub = 0 + x4.ub = 12 x5.lb = x0 + x1 : [5, 11] x5.ub = x0 + x1 : [5, 11] Layer 2: - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] */ List expectedBounds( { - // x2 does not appear, because it has been eliminated - + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), Tightening( 3, 5, Tightening::LB ), Tightening( 3, 11, Tightening::UB ), Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), + Tightening( 4, 12, Tightening::UB ), Tightening( 5, 5, Tightening::LB ), Tightening( 5, 11, Tightening::UB ), Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), + Tightening( 6, 7, Tightening::UB ), } ); List bounds; @@ -2608,7 +2627,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( expectedBounds.exists( bound ) ); } - void test_sbt_abs_all_positive() + void test_sbt_absolute_values_active_and_externally_fixed() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); @@ -2670,6 +2689,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 1 ); tableau.setUpperBound( 1, 5 ); + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); @@ -2682,531 +2707,461 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 1: - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] + x2 is eliminated, everything set to -3 x3.lb = x0 + x1 : [5, 11] x3.ub = x0 + x1 : [5, 11] - Both absolute values positive, bound survive through activations: + Second absolute value is positive, bounds surive the activation - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] + x4: all set to 3 x5.lb = x0 + x1 : [5, 11] x5.ub = x0 + x1 : [5, 11] Layer 2: - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] */ List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), + // x2 does not appear, because it has been eliminated + Tightening( 3, 5, Tightening::LB ), Tightening( 3, 11, Tightening::UB ), - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), Tightening( 5, 5, Tightening::LB ), Tightening( 5, 11, Tightening::UB ), - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + printf( "Dumpign discovered bounds:\n" ); + for ( const auto &bound : bounds ) + bound.dump(); + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); for ( const auto &bound : bounds ) TS_ASSERT( expectedBounds.exists( bound ) ); } - void test_sbt_abs_positive_and_negative() + void test_generate_input_query() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) + for ( unsigned i = 1; i <= 5; ++i ) nlr.addLayerDependency( i - 1, i ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - // Very loose bounds for neurons except inputs - double large = 1000000; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); + // Set the weights and biases for the weighted sum layers - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); - /* - Input ranges: + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); - x0: [4, 6] - x1: [1, 5] + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); - Layer 1: + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 0 ); + nlr.setBias( 1, 2, 0 ); - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + nlr.setBias( 3, 0, 0 ); + nlr.setBias( 3, 1, 2 ); - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + nlr.setBias( 5, 0, 0 ); + nlr.setBias( 5, 1, 0 ); - First absolute value is negative, bounds get flipped - Second absolute value is positive, bounds surive the activation + // Mark the ReLU/Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); - x4.lb = -2x0 -3x1 + 30 : [3, 19] - x4.ub = -2x0 -3x1 + 30 : [3, 19] + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + // Start the testing + Query ipq; + nlr.generateQuery( ipq ); + List unhandledEquations; + Set varsInUnhandledConstraints; + TS_ASSERT( + ipq.constructNetworkLevelReasoner( unhandledEquations, varsInUnhandledConstraints ) ); + NLR::NetworkLevelReasoner *reconstructedNlr = ipq.getNetworkLevelReasoner(); - Layer 2: + double input[2]; + double output[2]; - x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] - x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] - */ + input[0] = 1; + input[1] = 1; - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 19, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 14, Tightening::UB ), - } ); + input[0] = 1; + input[1] = 2; - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); } - void test_sbt_absolute_values_positive_and_not_fixed() + void test_simulate_relu() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + populateNetwork( nlr ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); + // With ReLUs, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + // With ReLUs, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 1 ) ); + } - // Very loose bounds for neurons except inputs - double large = 1000000; + // With ReLUs, case 1 and 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0 ) ); + } + } - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); + void test_simulate_sigmoids() + { + NLR::NetworkLevelReasoner nlr; - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + populateNetworkWithSigmoids( nlr ); - /* - Input ranges: + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - x0: [4, 6] - x1: [1, 5] + // case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); - Layer 1: + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.6750, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 3.0167, + 0.0001 ) ); + } - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + // case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); - First absolute value is undecided, bounds are concretized. - Second ReLU is active, bounds surive the activation + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - x4 range: [0, 12] - x4.lb = 0 - x4.ub = 12 + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.6032, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.5790, + 0.0001 ) ); + } - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + // case 3 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); - Layer 2: + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 + 12 : [ 1, 7] + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.5045, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.1957, + 0.0001 ) ); + } + } + + void test_simulate_round() + { + NLR::NetworkLevelReasoner nlr; - x6 range: [-11, 7] - */ + populateNetworkWithRound( nlr ); - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + // With Round, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, 7, Tightening::UB ), - } ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } + // With Round, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 2.1 ) ); + simulations2.append( Vector( simulationSize, 1.4 ) ); - void test_sbt_absolute_values_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4, + 0.0001 ) ); + } - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + // With Round, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 2.1 ) ); + simulations3.append( Vector( simulationSize, 1.6 ) ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -12, + 0.0001 ) ); + } + } + + void test_simulate_sign() + { + NLR::NetworkLevelReasoner nlr; - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + populateNetworkWithSign( nlr ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + // With Sign, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, the weighted sum variable has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2 is eliminated, everything set to -3 - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Second absolute value is positive, bounds surive the activation - - x4: all set to 3 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 + 3 : [-8, -2] - x6.ub = - x0 - x1 + 3 : [-8, -2] - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, -2, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - printf( "Dumpign discovered bounds:\n" ); - for ( const auto &bound : bounds ) - bound.dump(); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_generate_input_query() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Variable indexing - - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - - // Set the weights and biases for the weighted sum layers - - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 1, 1, 0 ); - nlr.setBias( 1, 2, 0 ); - - nlr.setBias( 3, 0, 0 ); - nlr.setBias( 3, 1, 2 ); - - nlr.setBias( 5, 0, 0 ); - nlr.setBias( 5, 1, 0 ); - - // Mark the ReLU/Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Start the testing - Query ipq; - nlr.generateQuery( ipq ); - List unhandledEquations; - Set varsInUnhandledConstraints; - TS_ASSERT( - ipq.constructNetworkLevelReasoner( unhandledEquations, varsInUnhandledConstraints ) ); - NLR::NetworkLevelReasoner *reconstructedNlr = ipq.getNetworkLevelReasoner(); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; + // With Sign, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, -2 ) ); - TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } - input[0] = 1; - input[1] = 2; + // With Sign, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -1 ) ); + simulations3.append( Vector( simulationSize, 1 ) ); - TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4 ) ); + } } - - void test_simulate_relu() + + void test_simulate_abs() { NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); + populateNetworkWithAbs( nlr ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // With ReLUs, Inputs are zeros, only biases count + // With Abs, Inputs are zeros, only biases count Vector> simulations1; simulations1.append( Vector( simulationSize, 0 ) ); simulations1.append( Vector( simulationSize, 0 ) ); @@ -3227,10 +3182,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 4 ) ); } - // With ReLUs, case 1 + // With Abs, case 1 Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, -2 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); @@ -3245,10 +3200,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 1 ) ); + 4 ) ); } - // With ReLUs, case 1 and 2 + // With Abs, case 2 Vector> simulations3; simulations3.append( Vector( simulationSize, 1 ) ); simulations3.append( Vector( simulationSize, 2 ) ); @@ -3261,24 +3216,24 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 0 ) ); + 4 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 0 ) ); + 10 ) ); } } - - void test_simulate_sigmoids() + + void test_simulate_leaky_relu() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithSigmoids( nlr ); + populateNetworkWithLeakyRelu( nlr ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // case 1 + // With Leaky ReLU, Inputs are zeros, only biases count Vector> simulations1; simulations1.append( Vector( simulationSize, 0 ) ); simulations1.append( Vector( simulationSize, 0 ) ); @@ -3291,17 +3246,15 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 0.6750, - 0.0001 ) ); + 1 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 3.0167, - 0.0001 ) ); + 4 ) ); } - // case 2 + // With Leaky ReLU, case 1 (alpha=0.1) Vector> simulations2; simulations2.append( Vector( simulationSize, 1 ) ); simulations2.append( Vector( simulationSize, 1 ) ); @@ -3314,17 +3267,17 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 0.6032, + 0.9, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 2.5790, + 0.57, 0.0001 ) ); } - // case 3 + // With Leaky ReLU, case 2 Vector> simulations3; simulations3.append( Vector( simulationSize, 1 ) ); simulations3.append( Vector( simulationSize, 2 ) ); @@ -3337,26 +3290,26 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 0.5045, + -0.04, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 2.1957, + -0.76, 0.0001 ) ); } } - void test_simulate_round() + void test_simulate_max() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithRound( nlr ); + populateNetworkWithMax( nlr ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // With Round, Inputs are zeros, only biases count + // With Max, Inputs are zeros, only biases count Vector> simulations1; simulations1.append( Vector( simulationSize, 0 ) ); simulations1.append( Vector( simulationSize, 0 ) ); @@ -3369,18 +3322,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); + -3 ) ); } - // With Round, case 1 + // With Max, case 1 Vector> simulations2; - simulations2.append( Vector( simulationSize, 2.1 ) ); - simulations2.append( Vector( simulationSize, 1.4 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, -3 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); @@ -3390,20 +3338,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 2, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4, - 0.0001 ) ); + -18 ) ); } - // With Round, case 2 + // With Max, case 2 Vector> simulations3; - simulations3.append( Vector( simulationSize, 2.1 ) ); - simulations3.append( Vector( simulationSize, 1.6 ) ); + simulations3.append( Vector( simulationSize, -3 ) ); + simulations3.append( Vector( simulationSize, 3 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); @@ -3413,26 +3354,19 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 0, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -12, - 0.0001 ) ); + -5 ) ); } } - void test_simulate_sign() + void test_simulate_softmax() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithSign( nlr ); + populateNetworkWithSoftmax( nlr ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // With Sign, Inputs are zeros, only biases count + // With Softmax, Inputs are zeros, only biases count Vector> simulations1; simulations1.append( Vector( simulationSize, 0 ) ); simulations1.append( Vector( simulationSize, 0 ) ); @@ -3445,18 +3379,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 1 ) ); + 0.2999, + 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 4 ) ); + 2.4001, + 0.0001 ) ); } - // With Sign, case 1 + // With Softmax, case 1 Vector> simulations2; - simulations2.append( Vector( simulationSize, -2 ) ); - simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, -3 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); @@ -3466,18 +3402,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 1 ) ); + 0.1192, + 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 4 ) ); + 2.7615, + 0.0001 ) ); } - // With Sign, case 2 + // With Softmax, case 2 Vector> simulations3; - simulations3.append( Vector( simulationSize, -1 ) ); - simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, -3 ) ); + simulations3.append( Vector( simulationSize, 3 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); @@ -3487,24 +3425,26 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - -1 ) ); + 0.1206, + 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - -4 ) ); + 2.7588, + 0.0001 ) ); } } - void test_simulate_abs() + void test_simulate_bilinear() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithAbs( nlr ); + populateNetworkWithBilinear( nlr ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // With Abs, Inputs are zeros, only biases count + // With Bilinear, Inputs are zeros, only biases count Vector> simulations1; simulations1.append( Vector( simulationSize, 0 ) ); simulations1.append( Vector( simulationSize, 0 ) ); @@ -3517,18 +3457,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); + 0 ) ); } - // With Abs, case 1 + // With Bilinear, case 1 Vector> simulations2; - simulations2.append( Vector( simulationSize, -2 ) ); - simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, 0.1 ) ); + simulations2.append( Vector( simulationSize, -0.3 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); @@ -3538,18 +3473,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); + 2.8304, + 0.0001 ) ); } - // With Abs, case 2 + // With Bilinear, case 2 Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); + simulations3.append( Vector( simulationSize, -0.3 ) ); + simulations3.append( Vector( simulationSize, 0.3 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); @@ -3559,50 +3490,100 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 4 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 10 ) ); + 0.0912, + 0.0001 ) ); } } - - void test_simulate_leaky_relu() + + void test_simulate_non_consecutive_layers() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithLeakyRelu( nlr ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 0, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Set the weights and relus + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 2, 3, 1, -2 ); + nlr.setWeight( 0, 1, 3, 1, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 2, 5, 0, 1 ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // With Leaky ReLU, Inputs are zeros, only biases count + // Simulate1 Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); for ( unsigned i = 0; i < simulationSize; ++i ) - { TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 1 ) ); + 2 ) ); + + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) + .get( 0 ) .get( i ), - 4 ) ); - } + 0 ) ); + } - // With Leaky ReLU, case 1 (alpha=0.1) - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); + void test_simulate_abs_and_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbsAndRelu( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + // Simulate1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); for ( unsigned i = 0; i < simulationSize; ++i ) { @@ -3610,22 +3591,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 0.9, - 0.0001 ) ); + 2 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 0.57, - 0.0001 ) ); + 2 ) ); } - // With Leaky ReLU, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); for ( unsigned i = 0; i < simulationSize; ++i ) { @@ -3633,29 +3612,27 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - -0.04, - 0.0001 ) ); + 4 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - -0.76, - 0.0001 ) ); + 4 ) ); } } - void test_simulate_max() + void test_simulate_round_and_sign() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithMax( nlr ); + populateNetworkWithRoundAndSign( nlr ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // With Max, Inputs are zeros, only biases count + // With Round/Sign, case 1 Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 1.6 ) ); + simulations1.append( Vector( simulationSize, 1.4 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); @@ -3665,13 +3642,18 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - -3 ) ); + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -2 ) ); } - // With Max, case 1 + // With Round/Sign, case 2 Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, -3 ) ); + simulations2.append( Vector( simulationSize, 1.6 ) ); + simulations2.append( Vector( simulationSize, 1.6 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); @@ -3681,38 +3663,27 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - -18 ) ); - } - - // With Max, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -3 ) ); - simulations3.append( Vector( simulationSize, 3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { + -1 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) + .get( 1 ) .get( i ), - -5 ) ); + -4 ) ); } } - void test_simulate_softmax() + void test_simulate_leaky_relu_and_sigmoid() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithSoftmax( nlr ); + populateNetworkWithLeakyReluAndSigmoid( nlr ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // With Softmax, Inputs are zeros, only biases count + // With LeakyReLU/Sigmoid, case 1 Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); @@ -3722,20 +3693,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 0.2999, + 0.7109, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 2.4001, + 1.4602, 0.0001 ) ); } - // With Softmax, case 1 + // With LeakyReLU/Sigmoid, case 2 Vector> simulations2; simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, -3 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); @@ -3745,52 +3716,29 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 0.1192, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.7615, - 0.0001 ) ); - } - - // With Softmax, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -3 ) ); - simulations3.append( Vector( simulationSize, 3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.1206, + 0.4013, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 2.7588, + 0.6508, 0.0001 ) ); } } - void test_simulate_bilinear() + void test_simulate_softmax_and_max() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithBilinear( nlr ); + populateNetworkWithSoftmaxAndMax( nlr ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // With Bilinear, Inputs are zeros, only biases count + // With LeakyReLU/Sigmoid, case 1 Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, -3 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); @@ -3800,13 +3748,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 0 ) ); + -2.9998, + 0.0001 ) ); } - // With Bilinear, case 1 + // With LeakyReLU/Sigmoid, case 2 Vector> simulations2; - simulations2.append( Vector( simulationSize, 0.1 ) ); - simulations2.append( Vector( simulationSize, -0.3 ) ); + simulations2.append( Vector( simulationSize, -3 ) ); + simulations2.append( Vector( simulationSize, 3 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); @@ -3816,112 +3765,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 2.8304, - 0.0001 ) ); - } - - // With Bilinear, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -0.3 ) ); - simulations3.append( Vector( simulationSize, 0.3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.0912, + -1, 0.0001 ) ); } } - - void test_simulate_non_consecutive_layers() + + void test_simulate_relu_and_bilinear() { NLR::NetworkLevelReasoner nlr; - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 0, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Set the weights and relus - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 2, 3, 1, -2 ); - nlr.setWeight( 0, 1, 3, 1, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 2, 5, 0, 1 ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // Simulate1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2 ) ); - - // Simulate2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } + populateNetworkWithReluAndBilinear( nlr ); - void test_simulate_abs_and_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbsAndRelu( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // Simulate1 + // With Relu/Bilinear, case 1 Vector> simulations1; simulations1.append( Vector( simulationSize, 1 ) ); simulations1.append( Vector( simulationSize, 1 ) ); @@ -3934,15 +3791,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 2 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2 ) ); + 1 ) ); } - // Simulate2 + // With ReLU/Bilinear, case 2 Vector> simulations2; simulations2.append( Vector( simulationSize, 1 ) ); simulations2.append( Vector( simulationSize, 2 ) ); @@ -3955,203 +3807,1883 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 4 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); + 0 ) ); } } - void test_simulate_round_and_sign() + void test_interval_arithmetic_bound_propagation_relu_constraints() { NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); - populateNetworkWithRoundAndSign( nlr ); + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); - // With Round/Sign, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1.6 ) ); - simulations1.append( Vector( simulationSize, 1.4 ) ); + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + nlr.setTableau( &tableau ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -2 ) ); - } + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_abs_constraints() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Layer dependenices + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_sign_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSign( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, 3 ); + tableau.setUpperBound( 0, 4 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, 4, Tightening::LB ), Tightening( 2, 5, Tightening::UB ), + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 11, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, 1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, 1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, 4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_leaky_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithLeakyRelu( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), + + Tightening( 9, -0.28, Tightening::LB ), Tightening( 9, 10.1, Tightening::UB ), + Tightening( 11, -0.38, Tightening::LB ), Tightening( 11, 9.1, Tightening::UB ), + + Tightening( 12, -0.28, Tightening::LB ), Tightening( 12, 10.1, Tightening::UB ), + Tightening( 13, -1.42, Tightening::LB ), Tightening( 13, 37.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), + Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), + + Tightening( 9, -0.34, Tightening::LB ), Tightening( 9, 7.1, Tightening::UB ), + Tightening( 11, -0.32, Tightening::LB ), Tightening( 11, 7.3, Tightening::UB ), + + Tightening( 12, -0.34, Tightening::LB ), Tightening( 12, 7.1, Tightening::UB ), + Tightening( 13, -1.3, Tightening::LB ), Tightening( 13, 29, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_round_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithRound( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, 1.4 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, -1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), + Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), + + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + Tightening( 11, -7, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, -25, Tightening::LB ), Tightening( 13, 35, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3.1 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, 1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), + Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), + Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + + Tightening( 9, -16, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -15, Tightening::LB ), Tightening( 11, 2, Tightening::UB ), + + Tightening( 12, -16, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -61, Tightening::LB ), Tightening( 13, 7, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_sigmoid_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSigmoids( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.5000, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), + Tightening( 5, 0.0067, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), + Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.7311, Tightening::UB ), + + Tightening( 8, -0.2244, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), + Tightening( 10, 0.3948, Tightening::LB ), Tightening( 10, 2.2244, Tightening::UB ), + + Tightening( 9, 0.4441, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), + Tightening( 11, 0.5974, Tightening::LB ), Tightening( 11, 0.9024, Tightening::UB ), + + Tightening( 12, 0.4441, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), + Tightening( 13, 2.2364, Tightening::LB ), Tightening( 13, 3.5399, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.1192, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), + Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + + Tightening( 8, -0.7616, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), + Tightening( 10, 0.2384, Tightening::LB ), Tightening( 10, 2.6052, Tightening::UB ), + + Tightening( 9, 0.3183, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), + Tightening( 11, 0.5593, Tightening::LB ), Tightening( 11, 0.9312, Tightening::UB ), + + Tightening( 12, 0.3183, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), + Tightening( 13, 1.9963, Tightening::LB ), Tightening( 13, 3.6263, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_max_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithMax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 10, Tightening::UB ), + Tightening( 9, -8, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + + Tightening( 11, -10, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -8, Tightening::LB ), Tightening( 3, 9, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 9, Tightening::UB ), + Tightening( 7, -5, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 16, Tightening::UB ), + Tightening( 9, -14, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -5, Tightening::LB ), Tightening( 10, 16, Tightening::UB ), + + Tightening( 11, -16, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_softmax_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSoftmax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), + Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 9, 0.0240, Tightening::LB ), Tightening( 9, 0.8349, Tightening::UB ), + Tightening( 11, 0.1651, Tightening::LB ), Tightening( 11, 0.9759, Tightening::UB ), + + Tightening( 12, 0.0240, Tightening::LB ), Tightening( 12, 0.8349, Tightening::UB ), + Tightening( 13, 0.5192, Tightening::LB ), Tightening( 13, 3.7629, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), + + Tightening( 9, 0.0184, Tightening::LB ), Tightening( 9, 0.8678, Tightening::UB ), + Tightening( 11, 0.1322, Tightening::LB ), Tightening( 11, 0.9816, Tightening::UB ), + + Tightening( 12, 0.0184, Tightening::LB ), Tightening( 12, 0.8678, Tightening::UB ), + Tightening( 13, 0.4151, Tightening::LB ), Tightening( 13, 3.8125, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_bilinear_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithBilinear( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -0.1 ); + tableau.setUpperBound( 0, 0.1 ); + tableau.setLowerBound( 1, -0.1 ); + tableau.setUpperBound( 1, 0.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0.9, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), + Tightening( 3, -0.5, Tightening::LB ), Tightening( 3, 0.5, Tightening::UB ), + Tightening( 4, -0.3, Tightening::LB ), Tightening( 4, 0.3, Tightening::UB ), + Tightening( 5, -0.3, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), + + Tightening( 6, -0.55, Tightening::LB ), Tightening( 6, 0.55, Tightening::UB ), + Tightening( 7, -0.09, Tightening::LB ), Tightening( 7, 0.09, Tightening::UB ), + + Tightening( 8, 1.36, Tightening::LB ), Tightening( 8, 2.64, Tightening::UB ), + Tightening( 9, -0.64, Tightening::LB ), Tightening( 9, 0.64, Tightening::UB ), + + Tightening( 10, -1.6896, Tightening::LB ), Tightening( 10, 1.6896, Tightening::UB ), + + Tightening( 11, -1.6896, Tightening::LB ), Tightening( 11, 1.6896, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -0.3 ); + tableau.setUpperBound( 0, 0.1 ); + tableau.setLowerBound( 1, -0.1 ); + tableau.setUpperBound( 1, 0.2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, 0.7, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), + Tightening( 3, -0.8, Tightening::LB ), Tightening( 3, 0.9, Tightening::UB ), + Tightening( 4, -0.5, Tightening::LB ), Tightening( 4, 0.5, Tightening::UB ), + Tightening( 5, -0.6, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), + + Tightening( 6, -0.88, Tightening::LB ), Tightening( 6, 0.99, Tightening::UB ), + Tightening( 7, -0.3, Tightening::LB ), Tightening( 7, 0.3, Tightening::UB ), + + Tightening( 8, 0.82, Tightening::LB ), Tightening( 8, 3.29, Tightening::UB ), + Tightening( 9, -1.29, Tightening::LB ), Tightening( 9, 1.18, Tightening::UB ), + + Tightening( 10, -4.2441, Tightening::LB ), Tightening( 10, 3.8822, Tightening::UB ), + + Tightening( 11, -3.8822, Tightening::LB ), Tightening( 11, 4.2441, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_abs_and_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithAbsAndRelu( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -5, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + + Tightening( 10, -10, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_round_and_sign_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithRoundAndSign( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, 1.4 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, -1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), + Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), + + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3.1 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, 1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), + Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), + Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_leaky_relu_and_sigmoid_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), + + Tightening( 9, 0.0573, Tightening::LB ), Tightening( 9, 0.9999, Tightening::UB ), + Tightening( 11, 0.0219, Tightening::LB ), Tightening( 11, 0.9999, Tightening::UB ), + + Tightening( 12, 0.0573, Tightening::LB ), Tightening( 12, 0.9999, Tightening::UB ), + Tightening( 13, 0.1229, Tightening::LB ), Tightening( 13, 3.9996, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), + Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), + + Tightening( 9, 0.0323, Tightening::LB ), Tightening( 9, 0.9992, Tightening::UB ), + Tightening( 11, 0.0392, Tightening::LB ), Tightening( 11, 0.9993, Tightening::UB ), + + Tightening( 12, 0.0323, Tightening::LB ), Tightening( 12, 0.9992, Tightening::UB ), + Tightening( 13, 0.1498, Tightening::LB ), Tightening( 13, 3.9972, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_softmax_and_max_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSoftmaxAndMax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - // With Round/Sign, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1.6 ) ); - simulations2.append( Vector( simulationSize, 1.6 ) ); + nlr.setTableau( &tableau ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4 ) ); - } - } - - void test_simulate_leaky_relu_and_sigmoid() - { - NLR::NetworkLevelReasoner nlr; + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - populateNetworkWithLeakyReluAndSigmoid( nlr ); + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), + Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), + + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ) + } ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // With LeakyReLU/Sigmoid, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.7109, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 1.4602, - 0.0001 ) ); - } + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - // With LeakyReLU/Sigmoid, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.0654, Tightening::LB ), Tightening( 9, 2.9933, Tightening::UB ), + + Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), + + Tightening( 11, -2.9933, Tightening::LB ), Tightening( 11, -0.0654, Tightening::UB ) + } ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.4013, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0.6508, - 0.0001 ) ); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - void test_simulate_softmax_and_max() + void test_interval_arithmetic_bound_propagation_relu_and_bilinear_constraints() { NLR::NetworkLevelReasoner nlr; + populateNetworkWithReluAndBilinear( nlr ); - populateNetworkWithSoftmaxAndMax( nlr ); + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); - // With LeakyReLU/Sigmoid, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, -3 ) ); + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + nlr.setTableau( &tableau ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -2.9998, - 0.0001 ) ); - } + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - // With LeakyReLU/Sigmoid, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -3 ) ); - simulations2.append( Vector( simulationSize, 3 ) ); + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), + + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + } ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1, - 0.0001 ) ); - } - } - - void test_simulate_relu_and_bilinear() - { - NLR::NetworkLevelReasoner nlr; + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - populateNetworkWithReluAndBilinear( nlr ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - // With Relu/Bilinear, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - } + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - // With ReLU/Bilinear, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } + Tightening( 10, -14, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), + + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } void test_concretize_input_assignment() @@ -4277,4 +5809,23 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite for ( const auto &bound : expectedBounds ) TS_ASSERT( bounds.exists( bound ) ); } + + bool boundsEqual( const List &bounds, const List &expectedBounds ) + { + if ( bounds.size() != expectedBounds.size() ) + return false; + + bool allFound = true; + for ( const auto &bound : bounds ) + { + bool currentFound = false; + for ( const auto &expectedBound : expectedBounds ) + { + currentFound |= ( bound._type == expectedBound._type && bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + } + allFound &= currentFound; + } + return allFound; + } }; From ee5dd0065088ea472a93876f479a6ca12f9eaf02 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:16:12 +0300 Subject: [PATCH 08/81] 0-4-0: Implementing symbolic bound tightening for every activation function. 0-4-0: Implementing symbolic bound tightening for every activation function. --- src/nlr/Layer.cpp | 1316 ++++++++++++++++++++++++++++++++++++++++++++- src/nlr/Layer.h | 18 +- 2 files changed, 1316 insertions(+), 18 deletions(-) diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 73f787595f..c7f728e860 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -2,7 +2,7 @@ /*! \file Layer.cpp ** \verbatim ** Top contributors (to current version): - ** Guy Katz + ** Guy Katz, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -1336,7 +1336,7 @@ void Layer::computeSymbolicBounds() switch ( _type ) { case INPUT: - comptueSymbolicBoundsForInput(); + computeSymbolicBoundsForInput(); break; case WEIGHTED_SUM: @@ -1354,7 +1354,31 @@ void Layer::computeSymbolicBounds() case ABSOLUTE_VALUE: computeSymbolicBoundsForAbsoluteValue(); break; - + + case LEAKY_RELU: + computeSymbolicBoundsForLeakyRelu(); + break; + + case ROUND: + computeSymbolicBoundsForRound(); + break; + + case SIGMOID: + computeSymbolicBoundsForSigmoid(); + break; + + case MAX: + computeSymbolicBoundsForMax(); + break; + + case SOFTMAX: + computeSymbolicBoundsForSoftmax(); + break; + + case BILINEAR: + computeSymbolicBoundsForBilinear(); + break; + default: computeSymbolicBoundsDefault(); break; @@ -1396,7 +1420,7 @@ void Layer::computeSymbolicBoundsDefault() } } -void Layer::comptueSymbolicBoundsForInput() +void Layer::computeSymbolicBoundsForInput() { std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); @@ -1532,19 +1556,19 @@ void Layer::computeSymbolicBoundsForRelu() { for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicLb[j * _size + i] = 0; - + _symbolicLowerBias[i] = 0; } else - { + { for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicLb[j * _size + i] = _symbolicLb[j * _size + i] * _symbolicUbOfLb[i] / ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); - + _symbolicLowerBias[i] = _symbolicLowerBias[i] * _symbolicUbOfLb[i] / ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); } - + _symbolicLbOfLb[i] = 0; } else @@ -1661,6 +1685,8 @@ void Layer::computeSymbolicBoundsForSign() _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + std::cout << "sign, p-ll, ul, lu, uu: " << _symbolicLbOfLb[i] << " " << _symbolicUbOfLb[i] << " " << _symbolicLbOfUb[i] << " " << _symbolicUbOfUb[i] << " " << std::endl; // Has the b variable been fixed? if ( !FloatUtils::isNegative( sourceLb ) ) @@ -1674,6 +1700,9 @@ void Layer::computeSymbolicBoundsForSign() if ( signPhase == PHASE_NOT_FIXED ) { + PhaseStatus upperSignPhase = PHASE_NOT_FIXED; + PhaseStatus lowerSignPhase = PHASE_NOT_FIXED; + // If we got here, we know that lbLb < 0 and ubUb // > 0 @@ -1687,9 +1716,8 @@ void Layer::computeSymbolicBoundsForSign() _symbolicUb[j * _size + i] = 0; _symbolicUpperBias[i] = 1; - - _symbolicUbOfUb[i] = 1; - _symbolicLbOfUb[i] = 1; + + upperSignPhase = SIGN_PHASE_POSITIVE; } else { @@ -1704,9 +1732,6 @@ void Layer::computeSymbolicBoundsForSign() // Do the same for the bias, and then adjust _symbolicUpperBias[i] *= factor; _symbolicUpperBias[i] += 1; - - _symbolicUbOfUb[i] = 1; - _symbolicLbOfUb[i] = -1; } // Lower bound @@ -1720,8 +1745,7 @@ void Layer::computeSymbolicBoundsForSign() _symbolicLowerBias[i] = -1; - _symbolicUbOfLb[i] = -1; - _symbolicLbOfLb[i] = -1; + lowerSignPhase = SIGN_PHASE_NEGATIVE; } else { @@ -1730,15 +1754,38 @@ void Layer::computeSymbolicBoundsForSign() double factor = 2.0 / _symbolicUbOfUb[i]; for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { _symbolicLb[j * _size + i] *= factor; + std::cout << "sign, p-lb: " << _symbolicLb[j * _size + i] << std::endl; + } // Do the same for the bias, and then adjust _symbolicLowerBias[i] *= factor; _symbolicLowerBias[i] -= 1; - + std::cout << "sign, p-lB: " << _symbolicLowerBias[i] << std::endl; + } + + if (upperSignPhase == PHASE_NOT_FIXED) + { + _symbolicUbOfUb[i] = 1; + _symbolicLbOfUb[i] = -1; + } + else + { + _symbolicUbOfUb[i] = 1; + _symbolicLbOfUb[i] = 1; + } + + if (lowerSignPhase == PHASE_NOT_FIXED) + { _symbolicUbOfLb[i] = 1; _symbolicLbOfLb[i] = -1; } + else + { + _symbolicUbOfLb[i] = -1; + _symbolicLbOfLb[i] = -1; + } } else { @@ -1918,6 +1965,1241 @@ void Layer::computeSymbolicBoundsForAbsoluteValue() } } +void Layer::computeSymbolicBoundsForLeakyRelu() +{ + ASSERT( _alpha > 0 && _alpha < 1 ); + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + /* + There are two ways we can determine that a LeakyReLU has become fixed: + + 1. If the LeakyReLU's variable has been externally fixed + 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) + */ + PhaseStatus leakyReluPhase = PHASE_NOT_FIXED; + + // Has the f variable been eliminated or fixed? + if ( FloatUtils::isPositive( _lb[i] ) ) + leakyReluPhase = RELU_PHASE_ACTIVE; + else if ( FloatUtils::isZero( _ub[i] ) ) + leakyReluPhase = RELU_PHASE_INACTIVE; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A LeakyReLU initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) + { + leakyReluPhase = RELU_PHASE_ACTIVE; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + leakyReluPhase = RELU_PHASE_INACTIVE; + } + + if ( leakyReluPhase == PHASE_NOT_FIXED ) + { + // LeakyReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + // Concrete upper bound: x_f <= ub_b + double width = sourceUb - sourceLb; + double coeff = ( sourceUb - _alpha * sourceLb ) / width; + + if ( _alpha <= 1 ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= coeff; + } + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= coeff; + _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; + + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We + // use the heuristic described in section 4.1 of + // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + // to set the value of lambda (either 0 or 1 is considered). + if ( sourceUb > sourceLb ) + { + // lambda = 1 + // Symbolic lower bound: x_f >= x_b + // Concrete lower bound: x_f >= sourceLb + + // Lower bounds are passed as is + } + else + { + // lambda = 1 + // Symbolic lower bound: x_f >= _alpha x_b + // Concrete lower bound: x_f >= 0 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= _alpha; + } + + _symbolicLowerBias[i] *= _alpha; + } + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= coeff; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= coeff; + _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; + + if ( sourceUb > sourceLb ) + { + // Upper bounds are passed as is + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= _alpha; + } + + _symbolicUpperBias[i] *= _alpha; + } + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + } + else + { + // The phase of this LeakyReLU is fixed! + if ( leakyReluPhase == RELU_PHASE_ACTIVE ) + { + // Positive LeakyReLU, bounds are propagated as is + } + else + { + // Negative LeakyReLU, bounds are multiplied by _alpha + _symbolicLbOfLb[i] *= _alpha; + _symbolicUbOfLb[i] *= _alpha; + _symbolicLbOfUb[i] *= _alpha; + _symbolicUbOfUb[i] *= _alpha; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= _alpha; + _symbolicLb[j * _size + i] *= _alpha; + } + + _symbolicLowerBias[i] *= _alpha; + _symbolicUpperBias[i] *= _alpha; + } + } + + if ( _symbolicUbOfUb[i] > sourceUb ) + _symbolicUbOfUb[i] = sourceUb; + if ( _symbolicLbOfLb[i] < _alpha * sourceLb ) + _symbolicLbOfLb[i] = _alpha * sourceLb; + + /* + We now have the tightest bounds we can for the leakyRelu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + + +void Layer::computeSymbolicBoundsForSigmoid() +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A Sigmoid initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Bounds of lb, ub are the Sigmoids of source lb, ub + double sourceUbSigmoid = SigmoidConstraint::sigmoid( sourceUb ); + double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); + + // Case when the Sigmoid constraint is fixed + if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = 0; + _symbolicUb[j * _size + i] = 0; + } + + _symbolicLbOfUb[i] = sourceUbSigmoid; + _symbolicUbOfUb[i] = sourceUbSigmoid; + _symbolicLbOfLb[i] = sourceLbSigmoid; + _symbolicUbOfLb[i] = sourceLbSigmoid; + + _symbolicUpperBias[i] = sourceUbSigmoid; + _symbolicLowerBias[i] = sourceLbSigmoid; + } + + // Sigmoid not fixed + else + { + double lambda = ( _ub[i] - _lb[i] ) / ( sourceUb - sourceLb ); + double lambdaPrime = std::min( SigmoidConstraint::sigmoidDerivative( sourceLb ), + SigmoidConstraint::sigmoidDerivative( sourceUb ) ); + + // update lower bound + if ( FloatUtils::isPositive( sourceLb ) ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= lambda; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= lambda; + _symbolicLowerBias[i] += sourceLbSigmoid - lambda * sourceLb; + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= lambdaPrime; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= lambdaPrime; + _symbolicLowerBias[i] += sourceLbSigmoid - lambdaPrime * sourceLb; + } + + // update upper bound + if ( !FloatUtils::isPositive( sourceUb ) ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= lambda; + } + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= lambda; + _symbolicUpperBias[i] += sourceUbSigmoid - lambda * sourceUb; + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= lambdaPrime; + } + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= lambdaPrime; + _symbolicUpperBias[i] += sourceUbSigmoid - lambdaPrime * sourceUb; + } + + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + } + + if ( _symbolicLbOfLb[i] < -1 ) + _symbolicLbOfLb[i] = -1; + if ( _symbolicUbOfUb[i] > 1 ) + _symbolicUbOfUb[i] = 1; + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeSymbolicBoundsForRound() +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A Round initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + + // Bounds of lb, ub are the rounded values of source lb, ub + double sourceUbRound = FloatUtils::round( sourceUb ); + double sourceLbRound = FloatUtils::round( sourceLb ); + + _symbolicLbOfUb[i] = sourceUbRound; + _symbolicUbOfUb[i] = sourceUbRound; + _symbolicLbOfLb[i] = sourceLbRound; + _symbolicUbOfLb[i] = sourceLbRound; + + + // Case when the Round constraint is fixed + if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) + { + _symbolicUb[i] = 0; + _symbolicUpperBias[i] = sourceUbRound; + + _symbolicLb[i] = 0; + _symbolicLowerBias[i] = sourceLbRound; + } + + // Round not fixed + else + { + // Symbolic upper bound: x_f <= x_b + 0.5 + // Concrete upper bound: x_f <= round(ub_b) + + _symbolicUpperBias[i] += 0.5; + + // Symbolic lower bound: x_f >= x_b - 0.5 + // Concrete lower bound: x_f >= round(lb_b) + + _symbolicLowerBias[i] -= 0.5; + } + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeSymbolicBoundsForMax() +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); + double maxLowerBound = FloatUtils::negativeInfinity(); + double maxUpperBound = FloatUtils::negativeInfinity(); + + Map sourceLbs; + Map sourceUbs; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs[sourceIndex] = sourceLb; + sourceUbs[sourceIndex] = sourceUb; + + if ( maxLowerBound < sourceLb ) + { + indexOfMaxLowerBound = sourceIndex; + maxLowerBound = sourceLb; + } + if ( maxUpperBound < sourceUb ) + { + maxUpperBound = sourceUb; + } + } + + // The phase is fixed if the lower-bound of a source variable x_b is + // larger than the upper-bounds of the other source variables. + bool phaseFixed = true; + for ( const auto &sourceIndex : sources ) + { + if ( sourceIndex != indexOfMaxLowerBound && + FloatUtils::gt( sourceUbs[sourceIndex], maxLowerBound ) ) + { + phaseFixed = false; + break; + } + } + + if ( phaseFixed ) + { + // Phase fixed + // Symbolic bound: x_b <= x_f <= x_b + // Concrete bound: lb_b <= x_f <= ub_b + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[indexOfMaxLowerBound._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[indexOfMaxLowerBound._neuron]; + + + _symbolicLbOfLb[i] = maxLowerBound; + _symbolicUbOfLb[i] = maxLowerBound; + _symbolicLbOfUb[i] = sourceUbs[indexOfMaxLowerBound]; + _symbolicUbOfUb[i] = sourceUbs[indexOfMaxLowerBound]; + } + else + { + // MaxPool not fixed + // Symbolic bounds: x_b <= x_f <= maxUpperBound + // Concrete bounds: lb_b <= x_f <= maxUpperBound + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; + _symbolicUb[j * _size + i] = 0; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[indexOfMaxLowerBound._neuron]; + _symbolicUpperBias[i] = maxUpperBound; + + _symbolicLbOfLb[i] = maxLowerBound; + _symbolicUbOfLb[i] = maxLowerBound; + _symbolicLbOfUb[i] = maxUpperBound; + _symbolicUbOfUb[i] = maxUpperBound; + } + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeSymbolicBoundsForSoftmax() +{ + std::fill_n( _symbolicLowerBias, _size, 0 ); + std::fill_n( _symbolicUpperBias, _size, 0 ); + + double *symbolicLb = new double[_size * _size]; + double *symbolicUb = new double[_size * _size]; + std::fill_n( symbolicLb, _size * _size, 0 ); + std::fill_n( symbolicUb, _size * _size, 0 ); + + double *_work = new double[_size * _size]; + std::fill_n( _work, _size * _size, 0 ); + + Set handledInputNeurons; + unsigned sourceLayerSize = _size; + SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + sourceLayerSize = sourceLayer->getSize(); + ASSERT( sourceLayerSize == _size ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceMids; + Vector targetLbs; + Vector targetUbs; + unsigned len = 0; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceMids.append( ( sourceLb + sourceUb ) / 2 ); + targetLbs.append( _lb[i] ); + targetUbs.append( _ub[i] ); + + ++len; + } + + // Find the index of i in the softmax + unsigned index = 0; + for ( const auto &sourceIndex : sources ) + { + if ( handledInputNeurons.exists( sourceIndex._neuron ) ) + ++index; + else + { + handledInputNeurons.insert( sourceIndex._neuron ); + break; + } + } + + double lb = softmaxLinearLowerBound( sourceLbs, sourceUbs, index ); + double ub = softmaxLinearUpperBound( sourceLbs, sourceUbs, index ); + if ( _lb[i] < lb ) + { + _lb[i] = lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > ub ) + { + _ub[i] = ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + targetLbs[index] = _lb[i]; + targetUbs[index] = _ub[i]; + + if ( FloatUtils::areEqual( _lb[i], _ub[i] ) ) + { + _symbolicLowerBias[i] = _lb[i]; + _symbolicUpperBias[i] = _ub[i]; + for ( const auto &sourceIndex : sources ) + { + symbolicLb[len * sourceIndex._neuron + i] = 0; + symbolicUb[len * sourceIndex._neuron + i] = 0; + } + } + else + { + // Compute symbolic bound + if ( boundType == SoftmaxBoundType::LOG_SUM_EXP_DECOMPOSITION ) + { + bool useLSE2 = false; + for ( const auto &lb : targetLbs ) + { + if ( lb > GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD ) + useLSE2 = true; + } + unsigned inputIndex = 0; + if ( !useLSE2 ) + { + _symbolicLowerBias[i] = + softmaxLSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); + for ( const auto &sourceIndex : sources ) + { + double dldj = + softmaxdLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + symbolicLb[len * sourceIndex._neuron + i] = dldj; + _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + } + else + { + _symbolicLowerBias[i] = + softmaxLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); + for ( const auto &sourceIndex : sources ) + { + double dldj = + softmaxdLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + symbolicLb[len * sourceIndex._neuron + i] = dldj; + _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + } + + _symbolicUpperBias[i] = softmaxLSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + inputIndex = 0; + for ( const auto &sourceIndex : sources ) + { + double dudj = + softmaxdLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + symbolicUb[len * sourceIndex._neuron + i] = dudj; + _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + } + else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) + { + _symbolicLowerBias[i] = softmaxERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + unsigned inputIndex = 0; + for ( const auto &sourceIndex : sources ) + { + double dldj = + softmaxdERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + symbolicLb[len * sourceIndex._neuron + i] = dldj; + _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + + _symbolicUpperBias[i] = softmaxERUpperBound( sourceMids, targetLbs, targetUbs, index ); + inputIndex = 0; + for ( const auto &sourceIndex : sources ) + { + double dudj = + softmaxdERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + symbolicUb[len * sourceIndex._neuron + i] = dudj; + _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + } + } + } + + for ( const auto &sourceLayerEntry : _sourceLayers ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerEntry.first ); + + /* + Perform the multiplication + + newUB = oldUB * posWeights + oldLB * negWeights + newLB = oldUB * negWeights + oldLB * posWeights + */ + + for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) + { + if ( symbolicLb[i] > 0 ) + _work[i] = symbolicLb[i]; + else + _work[i] = 0; + } + // _work is now positive weights in symbolicLb + matrixMultiplication( sourceLayer->getSymbolicLb(), + _work, + _symbolicLb, + _inputLayerSize, + sourceLayerSize, + _size ); + if ( sourceLayer->getSymbolicLowerBias() ) + matrixMultiplication( + sourceLayer->getSymbolicLowerBias(), _work, _symbolicLowerBias, 1, sourceLayerSize, _size ); + + for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) + { + if ( symbolicLb[i] < 0 ) + _work[i] = symbolicLb[i]; + else + _work[i] = 0; + } + // _work is now negative weights in symbolicLb + matrixMultiplication( sourceLayer->getSymbolicUb(), + _work, + _symbolicLb, + _inputLayerSize, + sourceLayerSize, + _size ); + if ( sourceLayer->getSymbolicLowerBias() ) + matrixMultiplication( + sourceLayer->getSymbolicUpperBias(), _work, _symbolicLowerBias, 1, sourceLayerSize, _size ); + + for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) + { + if ( symbolicUb[i] > 0 ) + _work[i] = symbolicUb[i]; + else + _work[i] = 0; + } + // _work is now positive weights in symbolicUb + matrixMultiplication( sourceLayer->getSymbolicUb(), + _work, + _symbolicUb, + _inputLayerSize, + sourceLayerSize, + _size ); + if ( sourceLayer->getSymbolicUpperBias() ) + matrixMultiplication( + sourceLayer->getSymbolicUpperBias(), _work, _symbolicUpperBias, 1, sourceLayerSize, _size ); + + for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) + { + if ( symbolicUb[i] < 0 ) + _work[i] = symbolicUb[i]; + else + _work[i] = 0; + } + // _work is now negative weights in symbolicUb + matrixMultiplication( sourceLayer->getSymbolicLb(), + _work, + _symbolicUb, + _inputLayerSize, + sourceLayerSize, + _size ); + if ( sourceLayer->getSymbolicUpperBias() ) + matrixMultiplication( + sourceLayer->getSymbolicLowerBias(), _work, _symbolicUpperBias, 1, sourceLayerSize, _size ); + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + } + + if ( symbolicLb ) + { + delete[] symbolicLb; + symbolicLb = NULL; + } + if ( symbolicUb ) + { + delete[] symbolicUb; + symbolicUb = NULL; + } + if ( _work ) + { + delete[] _work; + _work = NULL; + } +} + +void Layer::computeSymbolicBoundsForBilinear() +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + ASSERT( sources.size() == 2 ); + + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + bool allConstant = true; + unsigned indexA = 0; + unsigned indexB = 0; + unsigned counter = 0; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + + if ( counter == 0 ) + { + indexA = sourceIndex._neuron; + } + else + { + indexB = sourceIndex._neuron; + } + ++counter; + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicUpperBias[i] = sourceValues[0] * sourceValues[1]; + _symbolicLowerBias[i] = sourceValues[0] * sourceValues[1]; + continue; + } + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + // Symbolic lower bound: + // out >= alpha * x + beta * y + gamma + // where alpha = lb_y, beta = lb_x, gamma = -lb_x * lb_y + + // Symbolic upper bound: + // out <= alpha * x + beta * y + gamma + // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + if (sourceLbs[1] >= 0) + { + _symbolicLb[j * _size + i] += sourceLbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + } + else + { + _symbolicLb[j * _size + i] += sourceLbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + } + + if (sourceUbs[1] >= 0) + { + _symbolicUb[j * _size + i] += sourceUbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + } + else + { + _symbolicLb[j * _size + i] += sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + } + + if (sourceLbs[0] >= 0) + { + _symbolicLb[j * _size + i] += sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + } + else + { + _symbolicLb[j * _size + i] += sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; + } + } + _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; + _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + void Layer::computeSymbolicBoundsForWeightedSum() { std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index ceb16b3d68..4d1990ebc5 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -2,7 +2,7 @@ /*! \file Layer.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz + ** Guy Katz, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -275,6 +275,22 @@ class Layer double softmaxLinearUpperBound( const Vector &inputLbs, const Vector &inputUbs, unsigned i ); + + /* + Helper functions for symbolic bound tightening + */ + void computeSymbolicBoundsForInput(); + void computeSymbolicBoundsForRelu(); + void computeSymbolicBoundsForSign(); + void computeSymbolicBoundsForAbsoluteValue(); + void computeSymbolicBoundsForWeightedSum(); + void computeSymbolicBoundsForMax(); + void computeSymbolicBoundsForLeakyRelu(); + void computeSymbolicBoundsForSigmoid(); + void computeSymbolicBoundsForRound(); + void computeSymbolicBoundsForSoftmax(); + void computeSymbolicBoundsForBilinear(); + void computeSymbolicBoundsDefault(); /* Helper functions for interval bound tightening From 85fe8f3d7ca687b578c42f68255dfa7cfa13bea9 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:18:01 +0300 Subject: [PATCH 09/81] 0-4-1: Adding symbolic bound tightening tests for all activation functions. 0-4-1: Adding symbolic bound tightening tests for all activation functions. --- src/nlr/tests/Test_NetworkLevelReasoner.h | 6622 +++++++++++++-------- 1 file changed, 4267 insertions(+), 2355 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 33554ba680..42de1e97fa 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -2,7 +2,7 @@ /*! \file Test_NetworkLevelReasoner.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Andrew Wu + ** Guy Katz, Andrew Wu, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -17,6 +17,7 @@ #include "FloatUtils.h" #include "Layer.h" #include "NetworkLevelReasoner.h" +#include "DeepPolySoftmaxElement.h" #include "Options.h" #include "Query.h" #include "Tightening.h" @@ -991,7 +992,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); } - void populateNetworkWithReluAndBilinear( NLR::NetworkLevelReasoner &nlr ) { /* @@ -1059,1743 +1059,1769 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); } - - void test_evaluate_relu() + + void populateNetworkSBTRelu( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetwork( nlr ); + /* + 2 R 1 + x0 --- x2 ---> x4 --- x6 + \ / / + 1 \ / / + \/ -1 / + /\ / + 3 / \ / + / \ R / + x1 --- x3 ---> x5 + 1 + */ - double input[2]; - double output[2]; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - // With ReLUs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - // With ReLUs, case 1 - input[0] = 1; - input[1] = 1; + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - // With ReLUs, case 2 - input[0] = 1; - input[1] = 2; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); } - - void test_evaluate_sigmoids() + + void populateNetworkSBTReluResidual1( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* + -1 + __________________ + / \ + / 1 R -1 1 R 3 1 + x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 + \ / + \ 3 / + \________________________/ - populateNetworkWithSigmoids( nlr ); + */ - double input[2]; - double output[2]; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 2, NLR::Layer::RELU, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 4, NLR::Layer::RELU, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - // case 1 - input[0] = 0; - input[1] = 0; + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 1, 5 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 3 ); + nlr.setWeight( 0, 0, 3, 0, -1 ); + nlr.setWeight( 1, 0, 5, 0, 3 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); - // case 2 - input[0] = 1; - input[1] = 1; + nlr.setBias( 3, 0, 1 ); + nlr.setBias( 5, 0, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); + nlr.addActivationSource( 3, 0, 4, 0 ); - // case 3 - input[0] = 1; - input[1] = 2; + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); + tableau.getBoundManager().initialize( 6 ); + tableau.setLowerBound( 1, -large ); + tableau.setUpperBound( 1, large ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); } - void test_evaluate_abs() + void populateNetworkSBTReluResidual2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbs( nlr ); + /* + -1 + __________________ + / \ + / 1 R -1 1 R 3 1 1 + x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 --- x6 + \ / + \ 1 / + \_______________________________/ - double input[2]; - double output[2]; + */ - // With Abs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 2, NLR::Layer::RELU, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 4, NLR::Layer::RELU, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 6, NLR::Layer::WEIGHTED_SUM, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 6; ++i ) + nlr.addLayerDependency( i - 1, i ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 0, 5 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 3 ); + nlr.setWeight( 0, 0, 3, 0, -1 ); + nlr.setWeight( 0, 0, 5, 0, 1 ); + nlr.setWeight( 5, 0, 6, 0, 1 ); - // With Abs, case 1 - input[0] = -2; - input[1] = -2; + nlr.setBias( 3, 0, 1 ); + nlr.setBias( 5, 0, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + nlr.addActivationSource( 3, 0, 4, 0 ); - // With Abs, case 2 - input[0] = 1; - input[1] = 2; + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 6 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 1, -large ); + tableau.setUpperBound( 1, large ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); } - void test_evaluate_sign() + void populateNetworkSBTReluReindex( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* - populateNetworkWithSign( nlr ); + 1 1 1 1 + x0 --- x2 x5 --- x6 x9 --- x10 + \ /\ /\ / \ / \ / + 1 \ / R\ /-1\ / R \ / 1 \ / + \/ \/ \/ \/ \/ + /\ /\ /\ /\ /\ + 1 / \ R/ \ 1/ \ R / \ 1 / \ + / \/ \/ \ / \ / 0 \ + x1 --- x3 x4 --- x7 x8 --- x11 + -1 1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + */ - double input[2]; - double output[2]; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - // With Sign, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); - // With Sign, case 1 - input[0] = -2; - input[1] = -2; + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 0 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setBias( 5, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 0 ); - // With Sign, case 2 (0 considered "non-negative", sign(0)=1) - input[0] = -1; - input[1] = 1; + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 0 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); - } - - void test_evaluate_round() - { - NLR::NetworkLevelReasoner nlr; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - populateNetworkWithRound( nlr ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - double input[2]; - double output[2]; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - // With Round, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 8 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - // With Round, case 1 - input[0] = 2.1; - input[1] = 1.4; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); - - // With Round, case 2 - input[0] = 2.1; - input[1] = 1.6; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); } - void test_evaluate_leaky_relu() + void populateNetworkSBTLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyRelu( nlr ); - - double input[2]; - double output[2]; - - // With Leaky ReLU, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + /* - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + 1 LR 1 LR 1 1 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 1 \ / 0 \ / + \/ \/ \/ + /\ /\ /\ + 1 / \ 1 / \ 1 / \ + / \ LR / \ LR / 1 \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + -1 -1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + using LeakyReLU activation instead of ReLU + */ - // With Leaky ReLU, case 1 (alpha=0.1) - input[0] = 1; - input[1] = 1; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.getLayer( 2 )->setAlpha( 0.2 ); + nlr.getLayer( 4 )->setAlpha( 0.2 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - // With Leaky ReLU, case 2 - input[0] = 1; - input[1] = 2; + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); - } - - void test_evaluate_max() - { - NLR::NetworkLevelReasoner nlr; + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 0 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); - populateNetworkWithMax( nlr ); + nlr.setBias( 5, 0, 1 ); - double input[2]; - double output[1]; + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - // With Max, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - // With Max, case 1 - input[0] = 1; - input[1] = -3; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - // With Max, case 2 - input[0] = -3; - input[1] = 3; + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); } - - void test_evaluate_softmax() + void populateNetworkSBTSigmoidsAndRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmax( nlr ); + /* - double input[2]; - double output[2]; + 1 S 1 Rd + x0 --- x2 ---> x4 --- x6 --- x8 + \ / \ / + 1 \ / 1 \ / + \/ \/ + /\ /\ + 1 / \ 1 / \ + / \ S / \ Rd + x1 --- x3 ---> x5 --- x7 --- x9 + -1 -1 - // With Softmax, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + */ - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); - // With Softmax, case 1 - input[0] = 1; - input[1] = -3; + // Mark layer dependencies + for ( unsigned i = 1; i <= 4; ++i ) + nlr.addLayerDependency( i - 1, i ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); - // With Softmax, case 2 - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); - } - - void test_evaluate_bilinear() - { - NLR::NetworkLevelReasoner nlr; + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); - populateNetworkWithBilinear( nlr ); + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - double input[2]; - double output[1]; + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - // With Bilinear, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - // With Bilinear, case 1 - input[0] = 0.1; - input[1] = -0.3; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - // With Bilinear, case 2 - input[0] = -0.3; - input[1] = 0.3; + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); + tableau.getBoundManager().initialize( 10 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); } - void test_evaluate_non_consecutive_layers() + void populateNetworkSBTMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* + + 1 R Max 2 + x0 --- x2 ---> x4 --- x6 ---> x7 + \ / / + 1 \ / / + \/ / + /\ / + 1 / \ / + / \ R / + x1 --- x3 ---> x5 + -1 + + */ // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::MAX, 1 ); + nlr.addLayer( 4, NLR::Layer::WEIGHTED_SUM, 1 ); // Mark layer dependencies - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 0, 4 ); - nlr.addLayerDependency( 4, 5 ); + for ( unsigned i = 1; i <= 4; ++i ) + nlr.addLayerDependency( i - 1, i ); - // Set the weights and relus + // Set the weights and biases for the weighted sum layers nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + nlr.setWeight( 3, 0, 4, 0, 2 ); + // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 2, 3, 1, -2 ); - nlr.setWeight( 0, 1, 3, 1, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 2, 5, 0, 1 ); - - // Evaluate - double input[2]; - double output; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); - - input[0] = -1; - input[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); + // Mark the Max sources + nlr.addActivationSource( 2, 0, 3, 0 ); + nlr.addActivationSource( 2, 1, 3, 0 ); - TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 7 ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); + tableau.getBoundManager().initialize( 8 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); } - - void test_evaluate_abs_and_relu() + + void populateNetworkSBTSoftmax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* - populateNetworkWithAbsAndRelu( nlr ); - - double input[2]; - double output[2]; - input[0] = 1; - input[1] = 1; + x0 x3 S x6 - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + x1 x4 S x7 - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + x2 x5 S x8 - input[0] = 1; - input[1] = 2; + x3 = x0 - x1 + x2 + 1 + x4 = -x0 + x1 + x2 + 2 + x5 = -x0 - x1 - x2 + 3 - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + x6 x7 x8 = softmax(x3, x4, x5) - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - } - - void test_evaluate_round_and_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRoundAndSign( nlr ); - - double input[2]; - double output[2]; + x9 = x6 + x7 + x8 + x10 = x6 + x7 + x8 - input[0] = 1.6; - input[1] = 1.4; + */ - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); - input[0] = 1.6; - input[1] = 1.6; + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -1 ); + nlr.setWeight( 0, 0, 1, 2, -1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 2, -1 ); + nlr.setWeight( 0, 2, 1, 0, 1 ); + nlr.setWeight( 0, 2, 1, 1, 1 ); + nlr.setWeight( 0, 2, 1, 2, -1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 2 ); + nlr.setBias( 1, 2, 3 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); - } - - void test_evaluate_leaky_relu_and_sigmoid() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyReluAndSigmoid( nlr ); - - double input[2]; - double output[2]; + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); - input[0] = 1; - input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - input[0] = 1; - input[1] = 2; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 8 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 11 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); } - void test_evaluate_softmax_and_max() + void populateNetworkSBTSoftmax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmaxAndMax( nlr ); - - double input[2]; - double output[1]; + /* - input[0] = 1; - input[1] = -3; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + x0 x3 S x8 - TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); + x1 x4 S x9 - input[0] = -3; - input[1] = 3; + x2 x5 S x10 - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + x6 S x11 - TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); - } - - void test_evaluate_relu_and_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithReluAndBilinear( nlr ); - - double input[2]; - double output[1]; + x7 S x12 - input[0] = 1; - input[1] = 1; + x3 = x0 - x1 + x2 + 1 + x4 = -x0 + x1 + x2 + 2 + x5 = -x0 - x1 - x2 + 3 + x6 = -x0 - x1 - x2 + 2 + x7 = -x0 - x1 - x2 + 1 - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + x8 x10 x12 = softmax(x3, x5, x7) - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + x9 x11 = softmax(x4, x6) - input[0] = 1; - input[1] = 2; + x13 = x8 + x10 + x12 + x14 = -x8 - x10 - x12 + x15 = x9 + x11 + x16 = -x9 - x11 - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + */ - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - } + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 5 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 5 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 4 ); - void test_store_into_other() - { - NLR::NetworkLevelReasoner nlr; + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); - populateNetwork( nlr ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -1 ); + nlr.setWeight( 0, 0, 1, 2, -1 ); + nlr.setWeight( 0, 0, 1, 3, -1 ); + nlr.setWeight( 0, 0, 1, 4, -1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 2, -1 ); + nlr.setWeight( 0, 1, 1, 3, -1 ); + nlr.setWeight( 0, 1, 1, 4, -1 ); + nlr.setWeight( 0, 2, 1, 0, 1 ); + nlr.setWeight( 0, 2, 1, 1, 1 ); + nlr.setWeight( 0, 2, 1, 2, -1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + nlr.setWeight( 0, 2, 1, 4, -1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 4, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + nlr.setWeight( 2, 4, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 2, 1 ); + nlr.setWeight( 2, 3, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 3, -1 ); + nlr.setWeight( 2, 3, 3, 3, -1 ); - NLR::NetworkLevelReasoner nlr2; + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 2 ); + nlr.setBias( 1, 2, 3 ); + nlr.setBias( 1, 3, 2 ); + nlr.setBias( 1, 4, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 4, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 4, 2, 2 ); + nlr.addActivationSource( 1, 0, 2, 4 ); + nlr.addActivationSource( 1, 2, 2, 4 ); + nlr.addActivationSource( 1, 4, 2, 4 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 3 ); + nlr.addActivationSource( 1, 3, 2, 3 ); - double input[2]; - double output1[2]; - double output2[2]; + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - // Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 4 ), 7 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 4 ), 12 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 3 ), 16 ); - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - // With ReLUs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + tableau.getBoundManager().initialize( 17 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + } + + void populateNetworkSBTBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - // With ReLUs, case 1 - input[0] = 1; - input[1] = 1; + x0 x2 + x x4 -- x5 + x1 x3 - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + x2 = x0 - 2 * x1 + x3 = x0 + x1 + x4 = -x5 - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } + x4 = x2 * x3 + */ - void test_store_into_other_with_sigmoids() - { - NLR::NetworkLevelReasoner nlr; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - populateNetworkWithSigmoids( nlr ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); - NLR::NetworkLevelReasoner nlr2; + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -2 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); - double input[2]; - double output1[2]; - double output2[2]; - // case 1 - input[0] = 0; - input[1] = 0; + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - // case 2 - input[0] = 1; - input[1] = 1; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 5 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + tableau.getBoundManager().initialize( 6 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); } - - void test_store_into_other_with_round() + + void test_evaluate_relu() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithRound( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + populateNetwork( nlr ); double input[2]; - double output1[2]; - double output2[2]; + double output[2]; - // case 1 + // With ReLUs, Inputs are zeros, only biases count input[0] = 0; input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // case 2 + // With ReLUs, case 1 input[0] = 1; input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); + + // With ReLUs, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); } - - void test_store_into_other_with_sign() + + void test_evaluate_sigmoids() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithSign( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + populateNetworkWithSigmoids( nlr ); double input[2]; - double output1[2]; - double output2[2]; + double output[2]; // case 1 input[0] = 0; input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); // case 2 input[0] = 1; input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); + + // case 3 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); } - void test_store_into_other_with_abs() + void test_evaluate_abs() { NLR::NetworkLevelReasoner nlr; populateNetworkWithAbs( nlr ); - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - double input[2]; - double output1[2]; - double output2[2]; + double output[2]; - // case 1 + // With Abs, Inputs are zeros, only biases count input[0] = 0; input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // case 2 + // With Abs, case 1 + input[0] = -2; + input[1] = -2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Abs, case 2 input[0] = 1; - input[1] = 1; + input[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); } - void test_store_into_other_with_leaky_relu() + void test_evaluate_sign() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithLeakyRelu( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + populateNetworkWithSign( nlr ); double input[2]; - double output1[2]; - double output2[2]; + double output[2]; - // case 1 + // With Sign, Inputs are zeros, only biases count input[0] = 0; input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // case 2 - input[0] = 1; + // With Sign, case 1 + input[0] = -2; + input[1] = -2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Sign, case 2 (0 considered "non-negative", sign(0)=1) + input[0] = -1; input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); } - void test_store_into_other_with_max() + void test_evaluate_round() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithMax( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + populateNetworkWithRound( nlr ); double input[2]; - double output1[2]; - double output2[2]; + double output[2]; - // case 1 + // With Round, Inputs are zeros, only biases count input[0] = 0; input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // case 2 - input[0] = 1; - input[1] = 1; + // With Round, case 1 + input[0] = 2.1; + input[1] = 1.4; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); + + // With Round, case 2 + input[0] = 2.1; + input[1] = 1.6; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); } - void test_store_into_other_with_softmax() + void test_evaluate_leaky_relu() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithSoftmax( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + populateNetworkWithLeakyRelu( nlr ); double input[2]; - double output1[2]; - double output2[2]; + double output[2]; - // case 1 + // With Leaky ReLU, Inputs are zeros, only biases count input[0] = 0; input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // case 2 + // With Leaky ReLU, case 1 (alpha=0.1) input[0] = 1; input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); + + // With Leaky ReLU, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); } - void test_store_into_other_with_bilinear() + void test_evaluate_max() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithBilinear( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + populateNetworkWithMax( nlr ); double input[2]; - double output1[2]; - double output2[2]; + double output[1]; - // case 1 + // With Max, Inputs are zeros, only biases count input[0] = 0; input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); - // case 2 + // With Max, case 1 input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + input[1] = -3; - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - void populateNetworkSBT( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - 2 R 1 - x0 --- x2 ---> x4 --- x6 - \ / / - 1 \ / / - \/ -1 / - /\ / - 3 / \ / - / \ R / - x1 --- x3 ---> x5 - 1 - */ + TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + // With Max, case 2 + input[0] = -3; + input[1] = 3; - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); + TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); + } + + + void test_evaluate_softmax() + { + NLR::NetworkLevelReasoner nlr; - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + populateNetworkWithSoftmax( nlr ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + double input[2]; + double output[2]; - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + // With Softmax, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + // With Softmax, case 1 + input[0] = 1; + input[1] = -3; - // Very loose bounds for neurons except inputs - double large = 1000000; + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); - tableau.getBoundManager().initialize( 7 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + // With Softmax, case 2 + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); } - - void test_sbt_relus_all_active() + + void test_evaluate_bilinear() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + populateNetworkWithBilinear( nlr ); - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + double input[2]; + double output[1]; - /* - Input ranges: + // With Bilinear, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - x0: [4, 6] - x1: [1, 5] + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - Layer 1: + // With Bilinear, case 1 + input[0] = 0.1; + input[1] = -0.3; - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + // With Bilinear, case 2 + input[0] = -0.3; + input[1] = 0.3; - Both ReLUs active, bound survive through activations: + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); + } + + void test_evaluate_non_consecutive_layers() + { + NLR::NetworkLevelReasoner nlr; - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + // Mark layer dependencies + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 0, 4 ); + nlr.addLayerDependency( 4, 5 ); - Layer 2: + // Set the weights and relus + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] - */ - - List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 2, 3, 1, -2 ); + nlr.setWeight( 0, 1, 3, 1, 1 ); - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), - } ); + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 2, 5, 0, 1 ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); - } + // Evaluate + double input[2]; + double output; - void test_sbt_relus_active_and_inactive() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + input[0] = 1; + input[1] = 1; - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + input[0] = -1; + input[1] = 2; - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); - /* - Input ranges: + TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); + } - x0: [4, 6] - x1: [1, 5] + void test_evaluate_abs_and_relu() + { + NLR::NetworkLevelReasoner nlr; - Layer 1: + populateNetworkWithAbsAndRelu( nlr ); + + double input[2]; + double output[2]; - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + input[0] = 1; + input[1] = 1; - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - First ReLU is inactive, bounds get zeroed - Second ReLU is active, bounds surive the activation + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - x4.lb = 0 - x4.ub = 0 + input[0] = 1; + input[1] = 2; - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - Layer 2: + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + } + + void test_evaluate_round_and_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRoundAndSign( nlr ); + + double input[2]; + double output[2]; - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] - */ + input[0] = 1.6; + input[1] = 1.4; - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), - } ); + input[0] = 1.6; + input[1] = 1.6; - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); } - - void test_sbt_relus_active_and_not_fixed() + + void test_evaluate_leaky_relu_and_sigmoid() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + double input[2]; + double output[2]; - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); + input[0] = 1; + input[1] = 1; - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - /* - Input ranges: + TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); - x0: [4, 6] - x1: [1, 5] + input[0] = 1; + input[1] = 2; - Layer 1: + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); + } + + void test_evaluate_softmax_and_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmaxAndMax( nlr ); + + double input[2]; + double output[1]; - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + input[0] = 1; + input[1] = -3; - First ReLU is undecided, bound is concretized. - Coefficient: 12/(12--4) = 12/16 = 0.75 - Second ReLU is active, bounds surive the activation + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - x4 range: [0, 12] - x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 - x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + input[0] = -3; + input[1] = 3; - Layer 2: + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - x6.lb = 0.5x0 + 1.25x1 - 11.25 - x6.ub = 0.5x0 + 1.25x1 - 8.25 + TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); + } + + void test_evaluate_relu_and_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithReluAndBilinear( nlr ); + + double input[2]; + double output[1]; - x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] - */ + input[0] = 1; + input[1] = 1; - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 1, Tightening::UB ), - } ); + input[0] = 1; + input[1] = 2; - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); } - void test_sbt_relus_active_and_externally_fixed() + void test_store_into_other() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + populateNetwork( nlr ); - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); + NLR::NetworkLevelReasoner nlr2; - // However, one of the ReLU's variables has been eliminated - nlr.eliminateVariable( 2, -3 ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + double input[2]; + double output1[2]; + double output2[2]; - /* - Input ranges: + // Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - x0: [4, 6] - x1: [1, 5] + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - Layer 1: + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - First ReLU is inactive (set externally), bounds get zeroed - Second ReLU is active, bounds surive the activation + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - x4.lb = 0 - x4.ub = 0 + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + // With ReLUs, case 1 + input[0] = 1; + input[1] = 1; - Layer 2: + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] - */ + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } - List expectedBounds( { - // x2 does not appear, because it has been eliminated + void test_store_into_other_with_sigmoids() + { + NLR::NetworkLevelReasoner nlr; - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + populateNetworkWithSigmoids( nlr ); - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + NLR::NetworkLevelReasoner nlr2; - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), - } ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + double input[2]; + double output1[2]; + double output2[2]; - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } + // case 1 + input[0] = 0; + input[1] = 0; - void test_sbt_abs_all_positive() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + // case 2 + input[0] = 1; + input[1] = 1; - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_round() + { + NLR::NetworkLevelReasoner nlr; - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + populateNetworkWithRound( nlr ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + NLR::NetworkLevelReasoner nlr2; - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + double input[2]; + double output1[2]; + double output2[2]; - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + // case 1 + input[0] = 0; + input[1] = 0; - // Very loose bounds for neurons except inputs - double large = 1000000; + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + // case 2 + input[0] = 1; + input[1] = 1; - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - /* - Input ranges: + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_sign() + { + NLR::NetworkLevelReasoner nlr; - x0: [4, 6] - x1: [1, 5] + populateNetworkWithSign( nlr ); - Layer 1: + NLR::NetworkLevelReasoner nlr2; - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + double input[2]; + double output1[2]; + double output2[2]; - Both absolute values positive, bound survive through activations: + // case 1 + input[0] = 0; + input[1] = 0; - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - Layer 2: + // case 2 + input[0] = 1; + input[1] = 1; - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] - */ + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_abs() + { + NLR::NetworkLevelReasoner nlr; - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + populateNetworkWithAbs( nlr ); - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), - } ); + NLR::NetworkLevelReasoner nlr2; - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } + double input[2]; + double output1[2]; + double output2[2]; - void test_sbt_abs_positive_and_negative() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + // case 1 + input[0] = 0; + input[1] = 0; - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + // case 2 + input[0] = 1; + input[1] = 1; - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + populateNetworkWithLeakyRelu( nlr ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + NLR::NetworkLevelReasoner nlr2; - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + double input[2]; + double output1[2]; + double output2[2]; - // Very loose bounds for neurons except inputs - double large = 1000000; + // case 1 + input[0] = 0; + input[1] = 0; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); + // case 2 + input[0] = 1; + input[1] = 1; - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - /* - Input ranges: + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_max() + { + NLR::NetworkLevelReasoner nlr; - x0: [4, 6] - x1: [1, 5] + populateNetworkWithMax( nlr ); - Layer 1: + NLR::NetworkLevelReasoner nlr2; - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + double input[2]; + double output1[2]; + double output2[2]; - First absolute value is negative, bounds get flipped - Second absolute value is positive, bounds surive the activation + // case 1 + input[0] = 0; + input[1] = 0; - x4.lb = -2x0 -3x1 + 30 : [3, 19] - x4.ub = -2x0 -3x1 + 30 : [3, 19] + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - Layer 2: + // case 2 + input[0] = 1; + input[1] = 1; - x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] - x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] - */ + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_softmax() + { + NLR::NetworkLevelReasoner nlr; - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 19, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + populateNetworkWithSoftmax( nlr ); - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 14, Tightening::UB ), - } ); + NLR::NetworkLevelReasoner nlr2; - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } + double input[2]; + double output1[2]; + double output2[2]; - void test_sbt_absolute_values_positive_and_not_fixed() + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_bilinear() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithBilinear( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_generate_input_query() + { NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) + for ( unsigned i = 1; i <= 5; ++i ) nlr.addLayerDependency( i - 1, i ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - // Very loose bounds for neurons except inputs - double large = 1000000; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); + // Set the weights and biases for the weighted sum layers - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First absolute value is undecided, bounds are concretized. - Second ReLU is active, bounds surive the activation - - x4 range: [0, 12] - x4.lb = 0 - x4.ub = 12 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 + 12 : [ 1, 7] - - x6 range: [-11, 7] - */ - - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, 7, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_sbt_absolute_values_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, the weighted sum variable has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2 is eliminated, everything set to -3 - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Second absolute value is positive, bounds surive the activation - - x4: all set to 3 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 + 3 : [-8, -2] - x6.ub = - x0 - x1 + 3 : [-8, -2] - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, -2, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - printf( "Dumpign discovered bounds:\n" ); - for ( const auto &bound : bounds ) - bound.dump(); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_generate_input_query() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Variable indexing - - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - - // Set the weights and biases for the weighted sum layers - - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); nlr.setWeight( 2, 0, 3, 0, 1 ); nlr.setWeight( 2, 0, 3, 1, -1 ); @@ -3783,47 +3809,1162 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite simulations1.append( Vector( simulationSize, 1 ) ); simulations1.append( Vector( simulationSize, 1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + } + + // With ReLU/Bilinear, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } + } + + void test_interval_arithmetic_bound_propagation_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_abs_constraints() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Layer dependenices + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_sign_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSign( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, 3 ); + tableau.setUpperBound( 0, 4 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, 4, Tightening::LB ), Tightening( 2, 5, Tightening::UB ), + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 11, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, 1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, 1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, 4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_leaky_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithLeakyRelu( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), + + Tightening( 9, -0.28, Tightening::LB ), Tightening( 9, 10.1, Tightening::UB ), + Tightening( 11, -0.38, Tightening::LB ), Tightening( 11, 9.1, Tightening::UB ), + + Tightening( 12, -0.28, Tightening::LB ), Tightening( 12, 10.1, Tightening::UB ), + Tightening( 13, -1.42, Tightening::LB ), Tightening( 13, 37.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), + Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), + + Tightening( 9, -0.34, Tightening::LB ), Tightening( 9, 7.1, Tightening::UB ), + Tightening( 11, -0.32, Tightening::LB ), Tightening( 11, 7.3, Tightening::UB ), + + Tightening( 12, -0.34, Tightening::LB ), Tightening( 12, 7.1, Tightening::UB ), + Tightening( 13, -1.3, Tightening::LB ), Tightening( 13, 29, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_round_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithRound( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, 1.4 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, -1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), + Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), + + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + Tightening( 11, -7, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, -25, Tightening::LB ), Tightening( 13, 35, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3.1 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, 1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), + Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), + Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + + Tightening( 9, -16, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -15, Tightening::LB ), Tightening( 11, 2, Tightening::UB ), + + Tightening( 12, -16, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -61, Tightening::LB ), Tightening( 13, 7, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_sigmoid_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSigmoids( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.5000, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), + Tightening( 5, 0.0067, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), + Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.7311, Tightening::UB ), + + Tightening( 8, -0.2244, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), + Tightening( 10, 0.3948, Tightening::LB ), Tightening( 10, 2.2244, Tightening::UB ), + + Tightening( 9, 0.4441, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), + Tightening( 11, 0.5974, Tightening::LB ), Tightening( 11, 0.9024, Tightening::UB ), + + Tightening( 12, 0.4441, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), + Tightening( 13, 2.2364, Tightening::LB ), Tightening( 13, 3.5399, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.1192, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), + Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + + Tightening( 8, -0.7616, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), + Tightening( 10, 0.2384, Tightening::LB ), Tightening( 10, 2.6052, Tightening::UB ), + + Tightening( 9, 0.3183, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), + Tightening( 11, 0.5593, Tightening::LB ), Tightening( 11, 0.9312, Tightening::UB ), + + Tightening( 12, 0.3183, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), + Tightening( 13, 1.9963, Tightening::LB ), Tightening( 13, 3.6263, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_max_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithMax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 10, Tightening::UB ), + Tightening( 9, -8, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + + Tightening( 11, -10, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -8, Tightening::LB ), Tightening( 3, 9, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 9, Tightening::UB ), + Tightening( 7, -5, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 16, Tightening::UB ), + Tightening( 9, -14, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -5, Tightening::LB ), Tightening( 10, 16, Tightening::UB ), + + Tightening( 11, -16, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_softmax_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSoftmax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), + Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 9, 0.0240, Tightening::LB ), Tightening( 9, 0.8349, Tightening::UB ), + Tightening( 11, 0.1651, Tightening::LB ), Tightening( 11, 0.9759, Tightening::UB ), + + Tightening( 12, 0.0240, Tightening::LB ), Tightening( 12, 0.8349, Tightening::UB ), + Tightening( 13, 0.5192, Tightening::LB ), Tightening( 13, 3.7629, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - } + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - // With ReLU/Bilinear, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), + + Tightening( 9, 0.0184, Tightening::LB ), Tightening( 9, 0.8678, Tightening::UB ), + Tightening( 11, 0.1322, Tightening::LB ), Tightening( 11, 0.9816, Tightening::UB ), - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + Tightening( 12, 0.0184, Tightening::LB ), Tightening( 12, 0.8678, Tightening::UB ), + Tightening( 13, 0.4151, Tightening::LB ), Tightening( 13, 3.8125, Tightening::UB ), + } ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - void test_interval_arithmetic_bound_propagation_relu_constraints() + void test_interval_arithmetic_bound_propagation_bilinear_constraints() { NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); - + populateNetworkWithBilinear( nlr ); + + MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + tableau.getBoundManager().initialize( 12 ); // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 0, -0.1 ); + tableau.setUpperBound( 0, 0.1 ); + tableau.setLowerBound( 1, -0.1 ); + tableau.setUpperBound( 1, 0.1 ); double large = 1000; tableau.setLowerBound( 2, -large ); @@ -3846,10 +4987,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 10, large ); tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); nlr.setTableau( &tableau ); @@ -3860,34 +4997,32 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + Tightening( 2, 0.9, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), + Tightening( 3, -0.5, Tightening::LB ), Tightening( 3, 0.5, Tightening::UB ), + Tightening( 4, -0.3, Tightening::LB ), Tightening( 4, 0.3, Tightening::UB ), + Tightening( 5, -0.3, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), + + Tightening( 6, -0.55, Tightening::LB ), Tightening( 6, 0.55, Tightening::UB ), + Tightening( 7, -0.09, Tightening::LB ), Tightening( 7, 0.09, Tightening::UB ), - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + Tightening( 8, 1.36, Tightening::LB ), Tightening( 8, 2.64, Tightening::UB ), + Tightening( 9, -0.64, Tightening::LB ), Tightening( 9, 0.64, Tightening::UB ), + + Tightening( 10, -1.6896, Tightening::LB ), Tightening( 10, 1.6896, Tightening::UB ), + + Tightening( 11, -1.6896, Tightening::LB ), Tightening( 11, 1.6896, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 0, -0.3 ); + tableau.setUpperBound( 0, 0.1 ); + tableau.setLowerBound( 1, -0.1 ); + tableau.setUpperBound( 1, 0.2 ); tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); @@ -3909,10 +5044,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 10, large ); tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); // Initialize TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3921,109 +5052,39 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + Tightening( 2, 0.7, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), + Tightening( 3, -0.8, Tightening::LB ), Tightening( 3, 0.9, Tightening::UB ), + Tightening( 4, -0.5, Tightening::LB ), Tightening( 4, 0.5, Tightening::UB ), + Tightening( 5, -0.6, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), + + Tightening( 6, -0.88, Tightening::LB ), Tightening( 6, 0.99, Tightening::UB ), + Tightening( 7, -0.3, Tightening::LB ), Tightening( 7, 0.3, Tightening::UB ), - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + Tightening( 8, 0.82, Tightening::LB ), Tightening( 8, 3.29, Tightening::UB ), + Tightening( 9, -1.29, Tightening::LB ), Tightening( 9, 1.18, Tightening::UB ), + + Tightening( 10, -4.2441, Tightening::LB ), Tightening( 10, 3.8822, Tightening::UB ), + + Tightening( 11, -3.8822, Tightening::LB ), Tightening( 11, 4.2441, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - - void test_interval_arithmetic_bound_propagation_abs_constraints() + + void test_interval_arithmetic_bound_propagation_abs_and_relu_constraints() { NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Layer dependenices - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + populateNetworkWithAbsAndRelu( nlr ); MockTableau tableau; tableau.getBoundManager().initialize( 14 ); // Initialize the bounds tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); + tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + tableau.setUpperBound( 1, 1 ); double large = 1000; tableau.setLowerBound( 2, -large ); @@ -4060,23 +5121,23 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + Tightening( 10, -5, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), } ); List bounds; @@ -4121,42 +5182,42 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), - } ); + Tightening( 10, -10, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - void test_interval_arithmetic_bound_propagation_sign_constraints() + void test_interval_arithmetic_bound_propagation_round_and_sign_constraints() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithSign( nlr ); - + populateNetworkWithRoundAndSign( nlr ); + MockTableau tableau; tableau.getBoundManager().initialize( 14 ); // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 0, 1.4 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, -1.4 ); + tableau.setUpperBound( 1, 2.1 ); double large = 1000; tableau.setLowerBound( 2, -large ); @@ -4193,34 +5254,34 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), + Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), + + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Change the current bounds - tableau.setLowerBound( 0, 3 ); - tableau.setUpperBound( 0, 4 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 0, -3.1 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, 1.4 ); + tableau.setUpperBound( 1, 2.1 ); tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); @@ -4254,33 +5315,32 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds2( { - Tightening( 2, 4, Tightening::LB ), Tightening( 2, 5, Tightening::UB ), - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 11, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), + Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), + Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 10, 1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - Tightening( 12, 1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, 4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - void test_interval_arithmetic_bound_propagation_leaky_relu_constraints() + void test_interval_arithmetic_bound_propagation_leaky_relu_and_sigmoid_constraints() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithLeakyRelu( nlr ); + populateNetworkWithLeakyReluAndSigmoid( nlr ); MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -4326,26 +5386,27 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), + Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), - Tightening( 9, -0.28, Tightening::LB ), Tightening( 9, 10.1, Tightening::UB ), - Tightening( 11, -0.38, Tightening::LB ), Tightening( 11, 9.1, Tightening::UB ), + Tightening( 9, 0.0573, Tightening::LB ), Tightening( 9, 0.9999, Tightening::UB ), + Tightening( 11, 0.0219, Tightening::LB ), Tightening( 11, 0.9999, Tightening::UB ), - Tightening( 12, -0.28, Tightening::LB ), Tightening( 12, 10.1, Tightening::UB ), - Tightening( 13, -1.42, Tightening::LB ), Tightening( 13, 37.4, Tightening::UB ), + Tightening( 12, 0.0573, Tightening::LB ), Tightening( 12, 0.9999, Tightening::UB ), + Tightening( 13, 0.1229, Tightening::LB ), Tightening( 13, 3.9996, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Change the current bounds @@ -4386,42 +5447,42 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), - Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), + Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), + Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), - Tightening( 9, -0.34, Tightening::LB ), Tightening( 9, 7.1, Tightening::UB ), - Tightening( 11, -0.32, Tightening::LB ), Tightening( 11, 7.3, Tightening::UB ), + Tightening( 9, 0.0323, Tightening::LB ), Tightening( 9, 0.9992, Tightening::UB ), + Tightening( 11, 0.0392, Tightening::LB ), Tightening( 11, 0.9993, Tightening::UB ), - Tightening( 12, -0.34, Tightening::LB ), Tightening( 12, 7.1, Tightening::UB ), - Tightening( 13, -1.3, Tightening::LB ), Tightening( 13, 29, Tightening::UB ), + Tightening( 12, 0.0323, Tightening::LB ), Tightening( 12, 0.9992, Tightening::UB ), + Tightening( 13, 0.1498, Tightening::LB ), Tightening( 13, 3.9972, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - void test_interval_arithmetic_bound_propagation_round_constraints() + void test_interval_arithmetic_bound_propagation_softmax_and_max_constraints() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithRound( nlr ); + populateNetworkWithSoftmaxAndMax( nlr ); MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + tableau.getBoundManager().initialize( 12 ); // Initialize the bounds - tableau.setLowerBound( 0, 1.4 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, -1.4 ); - tableau.setUpperBound( 1, 2.1 ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); double large = 1000; tableau.setLowerBound( 2, -large ); @@ -4444,10 +5505,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 10, large ); tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); nlr.setTableau( &tableau ); @@ -4458,34 +5515,97 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds( { - Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), - Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), + Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), - Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), - Tightening( 11, -7, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), - Tightening( 13, -25, Tightening::LB ), Tightening( 13, 35, Tightening::UB ), + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ) } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Change the current bounds - tableau.setLowerBound( 0, -3.1 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, 1.4 ); - tableau.setUpperBound( 1, 2.1 ); + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.0654, Tightening::LB ), Tightening( 9, 2.9933, Tightening::UB ), + + Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), + + Tightening( 11, -2.9933, Tightening::LB ), Tightening( 11, -0.0654, Tightening::UB ) + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_relu_and_bilinear_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithReluAndBilinear( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + double large = 1000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -4506,10 +5626,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 10, large ); tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); // Initialize TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4517,45 +5635,34 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Perform the tightening pass TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - List expectedBounds2( { - Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), - Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), - Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - - Tightening( 9, -16, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -15, Tightening::LB ), Tightening( 11, 2, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - Tightening( 12, -16, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -61, Tightening::LB ), Tightening( 13, 7, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), + + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_sigmoid_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSigmoids( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + tableau.setUpperBound( 1, 2 ); - double large = 1000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -4576,12 +5683,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 10, large ); tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); // Initialize TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4589,287 +5690,608 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Perform the tightening pass TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -14, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), + + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_sbt_relus_all_active() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both ReLUs active, bound survive through activations: + + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. + Coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = 0.5x0 + 1.25x1 - 11.25 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] + */ + List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.5000, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), - Tightening( 5, 0.0067, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), - Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.7311, Tightening::UB ), + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), - Tightening( 8, -0.2244, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), - Tightening( 10, 0.3948, Tightening::LB ), Tightening( 10, 2.2244, Tightening::UB ), - - Tightening( 9, 0.4441, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), - Tightening( 11, 0.5974, Tightening::LB ), Tightening( 11, 0.9024, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), - Tightening( 12, 0.4441, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), - Tightening( 13, 2.2364, Tightening::LB ), Tightening( 13, 3.5399, Tightening::UB ), + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + void test_sbt_relus_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); - // Initialize + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.1192, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), - Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 8, -0.7616, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), - Tightening( 10, 0.2384, Tightening::LB ), Tightening( 10, 2.6052, Tightening::UB ), - - Tightening( 9, 0.3183, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), - Tightening( 11, 0.5593, Tightening::LB ), Tightening( 11, 0.9312, Tightening::UB ), + Layer 1: - Tightening( 12, 0.3183, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), - Tightening( 13, 1.9963, Tightening::LB ), Tightening( 13, 3.6263, Tightening::UB ), + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_interval_arithmetic_bound_propagation_max_constraints() + + void test_sbt_relu_residual1() { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithMax( nlr ); - + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual1( nlr, tableau ); - // Initialize the bounds tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - nlr.setTableau( &tableau ); + /* + Input ranges: - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x0: [-1, 1] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Layer 1: - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 10, Tightening::UB ), - Tightening( 9, -8, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + ReLU is undecided, bound is concretized. + Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] - Tightening( 11, -10, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + Layer 2 (with residual from x0): + + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. + Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + x4.lb = 0 + x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] + x4 range: [0, 2.5] + + Layer 3 (with residual from x1): + + x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] + + x5 range: [-2, 4] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 2, Tightening::LB ), + Tightening( 5, 5.5, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relu_residual2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual2( nlr, tableau ); - // Change the current bounds - tableau.setLowerBound( 0, -3 ); + tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - // Initialize + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -8, Tightening::LB ), Tightening( 3, 9, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 9, Tightening::UB ), - Tightening( 7, -5, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + x0: [-1, 1] - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 16, Tightening::UB ), - Tightening( 9, -14, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + Layer 1: + + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. + Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - Tightening( 10, -5, Tightening::LB ), Tightening( 10, 16, Tightening::UB ), + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] - Tightening( 11, -16, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + Layer 2 (with residual from x0): + + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. + Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + x4.lb = 0 + x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] + x4 range: [0, 2.5] + + Layer 3 (with residual from x0): + + x5.lb = 3 ( 0 ) + 1 ( x0 ) + 1 = 1x0 + 1 : [0, 2] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 1 ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] + x5 range: [0, 7.5] + + Layer 4: + x6.lb = 1x0 + 1 : [0, 2] + x6.ub = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] + x6 range: [0, 7.5] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 7.5, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 7.5, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_interval_arithmetic_bound_propagation_softmax_constraints() + void test_sbt_relu_reindex() { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSoftmax( nlr ); - + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + nlr.setTableau( &tableau ); + populateNetworkSBTReluReindex( nlr, tableau ); - // Initialize the bounds tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - nlr.setTableau( &tableau ); + /* + Input ranges: - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x0: [-1, 1] + x1: [-1, 1] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Layer 1: - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both ReLUs are undecided, bounds are concretized. + Coefficient: 2/( 2--2 ) = 2/4 = 0.5 - Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), - Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 + x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 + x4 range: [0, 2] + + x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 + x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 + x5 range: [0, 2] + + Layer 2: - Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), + x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] + x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] + x6 range: [-1, 3] + + x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] + x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] + x7 range: [-2, 2] + + Both ReLUs are undecided, bounds are concretized. + Coefficient (first ReLU, lower): 1/( 1--1 ) = 1/2 = 0.5 + Coefficient (first ReLU, upper): 1 (propagated as is) + Coefficient (second ReLU, lower): 0 (bound is zero) + Coefficient (second ReLU, upper): 2/( 2--2 ) = 2/4 = 0.5 - Tightening( 9, 0.0240, Tightening::LB ), Tightening( 9, 0.8349, Tightening::UB ), - Tightening( 11, 0.1651, Tightening::LB ), Tightening( 11, 0.9759, Tightening::UB ), + x8.lb = 0.5 ( x0 ) = 0.5x0 + x8.ub = x0 + 2 + x8 range: [0, 3] + + x9.lb = 0 + x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 + x9 range: [0, 2] + + Layer 3: + + x10.lb = 1 ( 0.5x0 ) + 1 ( 0 ) + 1 = 0.5x0 + 1 : [0.5, 1.5] + x10.ub = 1 ( x0 + 2 ) + 1 ( 0.5x1 + 1.5 ) + 1 = x0 + 0.5x1 + 4.5 : [3, 6] + x10 range: [0.5, 6] + + x11.lb = 0.5x1 - 0.5 + x11.ub = 0.5x1 + 1.5 + x11 range: [0, 2] + + */ - Tightening( 12, 0.0240, Tightening::LB ), Tightening( 12, 0.8349, Tightening::UB ), - Tightening( 13, 0.5192, Tightening::LB ), Tightening( 13, 3.7629, Tightening::UB ), - } ); + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, 0.5, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_abs_all_positive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); @@ -4881,66 +6303,110 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - // Initialize + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), - - Tightening( 9, 0.0184, Tightening::LB ), Tightening( 9, 0.8678, Tightening::UB ), - Tightening( 11, 0.1322, Tightening::LB ), Tightening( 11, 0.9816, Tightening::UB ), + Layer 1: - Tightening( 12, 0.0184, Tightening::LB ), Tightening( 12, 0.8678, Tightening::UB ), - Tightening( 13, 0.4151, Tightening::LB ), Tightening( 13, 3.8125, Tightening::UB ), + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both absolute values positive, bound survive through activations: + + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_interval_arithmetic_bound_propagation_bilinear_constraints() + + void test_sbt_abs_positive_and_negative() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; - populateNetworkWithBilinear( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); - // Initialize the bounds - tableau.setLowerBound( 0, -0.1 ); - tableau.setUpperBound( 0, 0.1 ); - tableau.setLowerBound( 1, -0.1 ); - tableau.setUpperBound( 1, 0.1 ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; - double large = 1000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -4951,116 +6417,114 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - nlr.setTableau( &tableau ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Initialize + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds( { - Tightening( 2, 0.9, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), - Tightening( 3, -0.5, Tightening::LB ), Tightening( 3, 0.5, Tightening::UB ), - Tightening( 4, -0.3, Tightening::LB ), Tightening( 4, 0.3, Tightening::UB ), - Tightening( 5, -0.3, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), - - Tightening( 6, -0.55, Tightening::LB ), Tightening( 6, 0.55, Tightening::UB ), - Tightening( 7, -0.09, Tightening::LB ), Tightening( 7, 0.09, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 8, 1.36, Tightening::LB ), Tightening( 8, 2.64, Tightening::UB ), - Tightening( 9, -0.64, Tightening::LB ), Tightening( 9, 0.64, Tightening::UB ), - - Tightening( 10, -1.6896, Tightening::LB ), Tightening( 10, 1.6896, Tightening::UB ), - - Tightening( 11, -1.6896, Tightening::LB ), Tightening( 11, 1.6896, Tightening::UB ), - } ); + Layer 1: - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - // Change the current bounds - tableau.setLowerBound( 0, -0.3 ); - tableau.setUpperBound( 0, 0.1 ); - tableau.setLowerBound( 1, -0.1 ); - tableau.setUpperBound( 1, 0.2 ); + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] - List expectedBounds2( { - Tightening( 2, 0.7, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), - Tightening( 3, -0.8, Tightening::LB ), Tightening( 3, 0.9, Tightening::UB ), - Tightening( 4, -0.5, Tightening::LB ), Tightening( 4, 0.5, Tightening::UB ), - Tightening( 5, -0.6, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), - - Tightening( 6, -0.88, Tightening::LB ), Tightening( 6, 0.99, Tightening::UB ), - Tightening( 7, -0.3, Tightening::LB ), Tightening( 7, 0.3, Tightening::UB ), + Layer 2: - Tightening( 8, 0.82, Tightening::LB ), Tightening( 8, 3.29, Tightening::UB ), - Tightening( 9, -1.29, Tightening::LB ), Tightening( 9, 1.18, Tightening::UB ), - - Tightening( 10, -4.2441, Tightening::LB ), Tightening( 10, 3.8822, Tightening::UB ), - - Tightening( 11, -3.8822, Tightening::LB ), Tightening( 11, 4.2441, Tightening::UB ), + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_interval_arithmetic_bound_propagation_abs_and_relu_constraints() + void test_sbt_absolute_values_positive_and_not_fixed() { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithAbsAndRelu( nlr ); + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; - double large = 1000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -5071,58 +6535,116 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - nlr.setTableau( &tableau ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Initialize + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is undecided, bounds are concretized. + Second absolute value is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] + */ List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_absolute_values_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - Tightening( 10, -5, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); @@ -5134,66 +6656,114 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - // Initialize + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + Layer 1: - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x2 is eliminated, everything set to -3 - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - Tightening( 10, -10, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + Second absolute value is positive, bounds surive the activation - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + x4: all set to 3 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), } ); - + + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_interval_arithmetic_bound_propagation_round_and_sign_constraints() + void test_sbt_signs_positive_and_not_fixed() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; - populateNetworkWithRoundAndSign( nlr ); - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); - // Initialize the bounds - tableau.setLowerBound( 0, 1.4 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, -1.4 ); - tableau.setUpperBound( 1, 2.1 ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; - double large = 1000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -5204,128 +6774,120 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - nlr.setTableau( &tableau ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Initialize + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds( { - Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), - Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), - - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + Layer 1: - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - // Change the current bounds - tableau.setLowerBound( 0, -3.1 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, 1.4 ); - tableau.setUpperBound( 1, 2.1 ); + First sign is undecided, bounds are concretized. + Second sign is active, bounds become constant 1 + Coefficient (first Sign, lower): 2/12 = 1/6. + Coefficient (first Sign, upper): -2/-4 = 1/2. - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + x4 range: [-1, 1] + x4.lb = 1/6 ( 2x0 + 3x1 - 15 ) - 1 = 2/6 x0 + 3/6 x1 - 21/6 + x4.ub = 1/2 ( 2x0 + 3x1 - 15 ) + 1 = x0 + 1.5x1 - 6.5 - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x5 range: [1, 1] + x5.lb = 1 + x5.ub = 1 + + Layer 2: - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + x6.lb = 1 ( 2/6 x0 + 3/6 x1 - 21/6 ) - 1 ( 1 ) = 2/6 x0 + 3/6 x1 - 27/6 : [-16/6, 0] + x6.ub = 1 ( x0 + 1.5x1 - 6.5 ) - 1 ( 1 ) = x0 + 1.5x1 - 7.5 : [-2, 6] - List expectedBounds2( { - Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), - Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), - Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x6 range: [-8/3, 6] + */ - Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2.6667, Tightening::LB ), + Tightening( 6, 6, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_interval_arithmetic_bound_propagation_leaky_relu_and_sigmoid_constraints() + + void test_sbt_signs_active_and_externally_fixed() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; - populateNetworkWithLeakyReluAndSigmoid( nlr ); - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; - double large = 1000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -5336,354 +6898,704 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - nlr.setTableau( &tableau ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Initialize + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2 is eliminated, everything set to -3 + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is negative, bounds become constant -1 + Second sign is positive, bounds become constant 1 + + x4: all set to -1 + + x5: all set to 1 + + Layer 2: + + x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 + x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 + */ List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + // x2 does not appear, because it has been eliminated - Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), - - Tightening( 9, 0.0573, Tightening::LB ), Tightening( 9, 0.9999, Tightening::UB ), - Tightening( 11, 0.0219, Tightening::LB ), Tightening( 11, 0.9999, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), - Tightening( 12, 0.0573, Tightening::LB ), Tightening( 12, 0.9999, Tightening::UB ), - Tightening( 13, 0.1229, Tightening::LB ), Tightening( 13, 3.9996, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, -1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 - // Change the current bounds - tableau.setLowerBound( 0, -3 ); + tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + tableau.setUpperBound( 1, 1 ); - // Initialize + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + x0: [-1, 1] + x1: [-1, 1] - Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Layer 1: - Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), - Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Bias: ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 - Tightening( 9, 0.0323, Tightening::LB ), Tightening( 9, 0.9992, Tightening::UB ), - Tightening( 11, 0.0392, Tightening::LB ), Tightening( 11, 0.9993, Tightening::UB ), + x4.lb = x0 + x1 + x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6x0 + 0.6x1 + 0.8 + x4 range: [-0.4, 2] + + x5.lb = x0 - x1 + x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6x0 - 0.6x1 + 0.8 + x5 range: [-0.4, 2] + + Layer 2: - Tightening( 12, 0.0323, Tightening::LB ), Tightening( 12, 0.9992, Tightening::UB ), - Tightening( 13, 0.1498, Tightening::LB ), Tightening( 13, 3.9972, Tightening::UB ), - } ); + x6.lb = 1 ( x0 + x1 ) + 1 ( x0 - x1 ) = 2x0 : [-2, 2] + x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2x0 + 1.6 : [0.4, 2.8] + x6 range: [-2, 2.8] + + x7.lb = 1 ( x0 + x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( x0 - x1 ) = -0.4x0 + 1.6x1 + 0.8 : [-1.2, 2.8] + x7 range: [-2.8, 2.8] + + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient (first LeakyReLU): ( 2.8 - 0.2*-2 )/( 2.8--2 ) = 3.2/4.8 = 10/15 + Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2 / ( 2.8--2 ) = 14/15 + + Coefficient (second LeakyReLU): ( 2.8 - 0.2*-2.8 )/( 2.8--2.8 ) = 3.36/5.6 = 0.6 + Bias (second LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2.8 / ( 2.8--2.8 ) = 1.12 + + x8.lb = 2x0 + x8.ub = 10/15 ( 1.2x0 + 1.6 ) + 14/15 = 0.8x0 + 2 + x8 range: [-0.4, 2.8] + + x9.lb = 0.4x0 + 1.6x1 - 0.8 + x9.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 + x9 range: [-0.56, 2.8] + + Layer 3: + + x10.lb = 1 ( 0.4x0 + 1.6x1 - 0.8 ) + 1 ( 2x0 ) + 1 = 2.4x0 + 1.6x1 + 0.2 : [-3.8, 5.2] + x10.ub = 1 ( -0.24 x0 + 0.96 x1 + 1.6 ) + 1 ( 0.8x0 + 2 ) + 1 = 0.56x0 + 0.96x1 + 4.6 : [3.08, 6.12] + x10 range: [-3.8, 6.12] + + x11.lb = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x11.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 : [0.4, 2.8] + x11 range: [-2.8, 2.8] + + */ + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.4, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -0.4, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), + Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), + + Tightening( 8, -0.4, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), + Tightening( 9, -0.56, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), + + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 6.12, Tightening::UB ), + Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) + + } ); + + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_interval_arithmetic_bound_propagation_softmax_and_max_constraints() + + void test_sbt_sigmoids_and_round() { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSoftmaxAndMax( nlr ); - + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); + nlr.setTableau( &tableau ); + populateNetworkSBTSigmoidsAndRound( nlr, tableau ); - // Initialize the bounds tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + // Layer 1 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); + // Layer 2 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); + + // Layer 3 + /* + Double-check with Python + --- + from math import exp as e + def g(x): + return 1 / (1 + e(-x)) + + def g_prime(x): + return g(x) * (1 - g(x)) + + def lam(l, u): + return (g(u) - g(l)) / (u - l) + + def lam_prime(l, u): + return min(g_prime(l), g_prime(u)) + + l3 = l4 = -2 + u3 = u4 = 2 + l5 = l6 = g(-2) + u5 = u6 = g(2) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) + x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) + x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) + x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) + print(x7_l) + print(x7_u) + print(x8_l) + print(x8_u) + --- + [output]: + 0.4483930148512481 + 1.5516069851487517 + -0.5516069851487517 + 0.5516069851487517 + */ + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); + + // Layer 4 + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + } + + void test_sbt_max_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); - // Initialize + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 2] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 3] + x2.ub = x0 + x1 : [-2, 3] + + x3.lb = x0 - x1 : [-3, 2] + x3.ub = x0 - x1 : [-3, 2] + + Both ReLUs are undecided, bounds are concretized. + Coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6 + Coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 + + x4.lb = 0.6 ( x0 + x1 ) = 0.6x0 + 0.6x1 + x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 + x4 range: [0, 3] + + x5.lb = 0.4 ( x0 - x1 ) = 0.4x0 + 0.4x1 + x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 + x5 range: [0, 2] + + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x4, and its upper bound is constant 3. + + x6.lb = 0.6x0 + 0.6x1 : [-1.2, 1.8] + x6.ub = 3 : [3, 3] + x6 range: [-1.2, 3] + + Layer 3: + + x7.lb = 2 ( 0.6x0 + 0.6x1 ) = 1.2x0 + 1.8x1 : [-2.4, 3.6] + x7.ub = 2 ( 3 ) = 6 : [6, 6] + x7 range: [-2.4, 6] + */ List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), - Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 3, Tightening::UB ), + Tightening( 3, -3, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2.4, Tightening::LB ), + Tightening( 7, 6, Tightening::UB ), - Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), - - Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - - Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ) } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + void test_sbt_max_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -3 ); + tableau.setUpperBound( 1, -2 ); - // Initialize + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + x0: [1, 2] + x1: [-3, -2] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 0] + x2.ub = x0 + x1 : [-2, 0] + + x3.lb = x0 - x1 : [3, 5] + x3.ub = x0 - x1 : [3, 5] + + First ReLU is negative, bounds become constant 0 + Second ReLU is positive, bounds survive the activation + + x4: all set to 0 + + x5.lb = x0 - x1 : [3, 5] + x5.ub = x0 - x1 : [3, 5] + + Max is fixed because x5.lb > x4.ub, it inherits x5's bounds + + x6.lb = x0 - x1 : [3, 5] + x6.ub = x0 - x1 : [3, 5] + + Layer 3: + + x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 0, Tightening::UB ), + Tightening( 3, 3, Tightening::LB ), + Tightening( 3, 5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 3, Tightening::LB ), + Tightening( 5, 5, Tightening::UB ), + Tightening( 6, 3, Tightening::LB ), + Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 6, Tightening::LB ), + Tightening( 7, 10, Tightening::UB ), - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.0654, Tightening::LB ), Tightening( 9, 2.9933, Tightening::UB ), - - Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), - - Tightening( 11, -2.9933, Tightening::LB ), Tightening( 11, -0.0654, Tightening::UB ) } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_interval_arithmetic_bound_propagation_relu_and_bilinear_constraints() + + void test_sbt_softmax1() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; - populateNetworkWithReluAndBilinear( nlr ); - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); - // Initialize the bounds tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + } + + void test_sbt_softmax2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + } + void test_sbt_softmax3() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax2( nlr, tableau ); - // Initialize + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + List expectedBounds({ Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), + Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.8668, Tightening::LB ), + Tightening( 8, 0.8668, Tightening::UB ), + Tightening( 9, 0.9820, Tightening::LB ), + Tightening( 9, 0.9820, Tightening::UB ), + Tightening( 10, 0.1173, Tightening::LB ), + Tightening( 10, 0.1173, Tightening::UB ), + Tightening( 11, 0.0179, Tightening::LB ), + Tightening( 11, 0.0179, Tightening::UB ), + Tightening( 12, 0.0159, Tightening::LB ), + Tightening( 12, 0.0159, Tightening::UB ), + Tightening( 13, 0.9470, Tightening::LB ), + Tightening( 13, 0.9470, Tightening::UB ), + Tightening( 14, -0.9470, Tightening::LB ), + Tightening( 14, -0.9470, Tightening::UB ), + Tightening( 15, 1.0253, Tightening::LB ), + Tightening( 15, 1.0253, Tightening::UB ), + Tightening( 16, -1.0253, Tightening::LB ), + Tightening( 16, -1.0253, Tightening::UB ) - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), - - Tightening( 11, -49, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + void test_softmax_bounds_er() + { + Vector inputLb = { -1, 0, 1 }; + Vector inputUb = { 0, 2, 4 }; + Vector input = { -0.5, 1, 2.5 }; + + double value = NLR::DeepPolySoftmaxElement::ERLowerBound( input, inputLb, inputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.0114799, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.00563867, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.000838421, 0.00001 ) ); + + + Vector outputLb = { 0.2, 0, 0 }; + Vector outputUb = { 0.4, 0.1, 0.1 }; + + value = NLR::DeepPolySoftmaxElement::ERUpperBound( input, outputLb, outputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, -1.44538, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 1.96538, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.358535, 0.00001 ) ); + } - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + void test_softmax_bounds_lse1() + { + Vector inputLb = { -1, 0, 1 }; + Vector inputUb = { 0, 2, 3 }; + Vector input = { -0.5, 1, 2 }; + double value = NLR::DeepPolySoftmaxElement::LSELowerBound( input, inputLb, inputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.00703444, 0.001 ) ); + + Vector outputLb = { 0.2, 0, 0 }; + Vector outputUb = { 0.4, 0.1, 0.1 }; + value = NLR::DeepPolySoftmaxElement::LSEUpperBound( input, outputLb, outputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.164165, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.272204, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.073207, 0.00001 ) ); + } + + void test_sbt_bilinear() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTBilinear( nlr, tableau ); - // Initialize + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x0: [1, 2] + x1: [-2, 1] + + Layer 1: - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + x2.lb = x0 - 2x1 : [-1, 6] + x2.ub = x0 - 2x1 : [-1, 6] - Tightening( 10, -14, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), - - Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), - } ); + x3.lb = x0 + x1 : [-1, 3] + x3.ub = x0 + x1 : [-1, 3] - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + Coefficients for bilinear layer: + Lower bound: + alpha_l = x3.lb = -1 + beta = x2.lb = -1 + gamma_l = -x2.lb * x3.lb = --1 * -1 = -1 + + Upper bound: + alpha_u = x3.ub = 3 + beta = x2.lb = -1 + gamma_u = -x2.lb * x3.ub = --1 * 3 = 3 - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + x4.lb = -1 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + -1 = -2x0 + x1 - 1 : [-7, -2] + x4.ub = 3 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + 3 = 2x0 - 7x1 + 3 : [0, 21] + x4 range: [-7, 21] + + Layer 3: + + x7.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] + x7.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] + x4 range: [-21, 5] + */ + + List expectedBounds( { Tightening( 2, -1, Tightening::LB ), + Tightening( 2, 6, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 3, Tightening::UB ), + Tightening( 4, -7, Tightening::LB ), + Tightening( 4, 21, Tightening::UB ), + Tightening( 5, -21, Tightening::LB ), + Tightening( 5, 7, Tightening::UB ) } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } void test_concretize_input_assignment() From 32d7c527b47cc05bed8702d37b3d1981a1e7773e Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:20:47 +0300 Subject: [PATCH 10/81] 0-5-0: Adding rounding constant for LP relaxation propagation. 0-5-0: Adding rounding constant for LP relaxation propagation. --- src/configuration/GlobalConfiguration.cpp | 1 + src/configuration/GlobalConfiguration.h | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index c70ce013ef..d8529ee7c1 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -71,6 +71,7 @@ const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; const double GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT = 0.00000000001; +const double GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT = 0.00000001; const double GlobalConfiguration::SIGMOID_CUTOFF_CONSTANT = 20; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index c89c1d54b3..ca06a28735 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -195,8 +195,9 @@ class GlobalConfiguration Symbolic bound tightening options */ - // Symbolic tightening rounding constant + // Symbolic tightening, LP rounding constants static const double SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; + static const double LP_TIGHTENING_ROUNDING_CONSTANT; static const double SIGMOID_CUTOFF_CONSTANT; From 5a7c5d8d63756bd8dd55b5ab57642448043bee49 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:24:17 +0300 Subject: [PATCH 11/81] 0-5-1: Implementing forward-backward LP propagation algorithm for all activation functions. 0-5-1: Implementing forward-backward LP propagation algorithm for all activation functions. --- src/nlr/LPFormulator.cpp | 550 ++++++++++++++++++++++++++++++++++++++- src/nlr/LPFormulator.h | 17 +- 2 files changed, 560 insertions(+), 7 deletions(-) diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 552fa64617..9341c7188e 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -21,6 +21,7 @@ #include "MStringf.h" #include "NLRError.h" #include "Options.h" +#include "DeepPolySoftmaxElement.h" #include "TimeUtils.h" #include "Vector.h" @@ -322,7 +323,14 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map TimeUtils::timePassed( gurobiStart, gurobiEnd ) / 1000000 ) .ascii() ); + // Clean up clearSolverQueue( freeSolvers ); + + if ( threads ) + { + delete[] threads; + threads = NULL; + } if ( infeasible ) throw InfeasibleQueryException(); @@ -492,7 +500,16 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRlaxation( ThreadArgument &args, threads[i].interrupt(); threads[i].join(); } + + // Clean up clearSolverQueue( freeSolvers ); + + if ( threads ) + { + delete[] threads; + threads = NULL; + } + throw InfeasibleQueryException(); } @@ -569,7 +586,8 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & { LPFormulator_LOG( Stringf( "Computing upperbound..." ).ascii() ); double ub = optimizeWithGurobi( - *gurobi, MinOrMax::MAX, variableName, cutoffValue, &infeasible ); + *gurobi, MinOrMax::MAX, variableName, cutoffValue, &infeasible ) + + GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT;; LPFormulator_LOG( Stringf( "Upperbound computed %f", ub ).ascii() ); // Store the new bound if it is tighter @@ -599,7 +617,8 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & LPFormulator_LOG( Stringf( "Computing lowerbound..." ).ascii() ); gurobi->reset(); double lb = optimizeWithGurobi( - *gurobi, MinOrMax::MIN, variableName, cutoffValue, &infeasible ); + *gurobi, MinOrMax::MIN, variableName, cutoffValue, &infeasible ) - + GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT; LPFormulator_LOG( Stringf( "Lowerbound computed: %f", lb ).ascii() ); // Store the new bound if it is tighter if ( lb > currentLb ) @@ -670,7 +689,6 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers } } - void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -688,10 +706,18 @@ void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, case Layer::WEIGHTED_SUM: addWeightedSumLayerToLpRelaxation( gurobi, layer, createVariables ); break; + + case Layer::ROUND: + addRoundLayerToLpRelaxation( gurobi, layer, createVariables ); + break; case Layer::LEAKY_RELU: addLeakyReluLayerToLpRelaxation( gurobi, layer, createVariables ); break; + + case Layer::ABSOLUTE_VALUE: + addAbsoluteValueLayerToLpRelaxation( gurobi, layer, createVariables ); + break; case Layer::SIGN: addSignLayerToLpRelaxation( gurobi, layer, createVariables ); @@ -700,6 +726,18 @@ void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, case Layer::MAX: addMaxLayerToLpRelaxation( gurobi, layer, createVariables ); break; + + case Layer::SIGMOID: + addSigmoidLayerToLpRelaxation( gurobi, layer, createVariables ); + break; + + case Layer::SOFTMAX: + addSoftmaxLayerToLpRelaxation( gurobi, layer, createVariables ); + break; + + case Layer::BILINEAR: + addBilinearLayerToLpRelaxation( gurobi, layer, createVariables ); + break; default: throw NLRError( NLRError::LAYER_TYPE_NOT_SUPPORTED, "LPFormulator" ); @@ -807,6 +845,244 @@ void LPFormulator::addReluLayerToLpRelaxation( GurobiWrapper &gurobi, } } + +void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = FloatUtils::round(sourceValue); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + double ub = std::max( FloatUtils::round( sourceUb ), layer->getUb( i ) ); + double lb = std::min( FloatUtils::round( sourceLb ), layer->getLb( i ) ); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // If u = l: y = round(u) + if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) + { + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, ub ); + } + + else + { + List terms; + // y <= x + 0.5, i.e. y - x <= 0.5 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, 0.5 ); + + // y >= x - 0.5, i.e. y - x >= -0.5 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, -0.5 ); + } + } + } +} + + +void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = sourceValue > 0 ? sourceValue : -sourceValue; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + double ub = std::min( FloatUtils::max( -sourceLb, sourceUb ), layer->getUb( i ) ); + + gurobi.addVariable(Stringf( "x%u", targetVariable ), 0, ub ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The AbsoluteValue is active, y = x + if ( sourceLb < 0 ) + sourceLb = 0; + + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // The AbsoluteValue is inactive, y = -x, i.e. y + x = 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else + { + /* + The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). + */ + + // y >= 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y <= max(-lb, ub) + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addLeqConstraint( terms, ub ); + } + } + } +} + + +void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = SigmoidConstraint::sigmoid(sourceValue); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + double ub = std::min( SigmoidConstraint::sigmoid( sourceUb ), layer->getUb( i ) ); + double lb = std::max( SigmoidConstraint::sigmoid( sourceLb ), layer->getLb( i ) ); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // If u = l: y = sigmoid(u) + if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) + { + List terms; + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, ub ); + } + + else + { + List terms; + double lambda = ( ub - lb ) / ( sourceUb - sourceLb ); + double lambdaPrime = std::min( SigmoidConstraint::sigmoidDerivative( sourceLb ), + SigmoidConstraint::sigmoidDerivative( sourceUb ) ); + + // update lower bound + if ( FloatUtils::isPositive( sourceLb ) ) + { + // y >= lambda * (x - l), i.e. y - lambda * x >= lambda * l + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, sourceLb * lambda ); + } + + else + { + // y >= lambda' * (x - l), i.e. y - lambda' * x >= lambda' * l + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, sourceLb * lambdaPrime ); + } + + // update upper bound + if ( !FloatUtils::isPositive( sourceUb ) ) + { + // y <= lambda * (x - u), i.e. y - lambda * x <= lambda * u + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, sourceUb * lambda ); + } + else + { + // y <= lambda' * (x - u), i.e. y - lambda' * x <= lambda' * u + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, sourceUb * lambdaPrime ); + } + } + } + } +} + + + void LPFormulator::addSignLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -890,6 +1166,7 @@ void LPFormulator::addSignLayerToLpRelaxation( GurobiWrapper &gurobi, } } + void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -971,6 +1248,267 @@ void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, } } + +void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( layer->neuronEliminated( i ) ) + continue; + + Set handledInputNeurons; + List sources = layer->getActivationSources( i ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceMids; + Vector targetLbs; + Vector targetUbs; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceMids.append( ( sourceLb + sourceUb ) / 2 ); + targetLbs.append( layer->getLb( i ) ); + targetUbs.append( layer->getUb( i ) ); + } + + // Find the index of i in the softmax + unsigned index = 0; + for ( const auto &source : sources ) + { + if ( handledInputNeurons.exists( source._neuron ) ) + ++index; + else + { + handledInputNeurons.insert( source._neuron ); + break; + } + } + + double ub = std::min( DeepPolySoftmaxElement::linearUpperBound( sourceLbs, sourceUbs, index ), layer->getUb( i ) ); + double lb = std::max( DeepPolySoftmaxElement::linearLowerBound( sourceLbs, sourceUbs, index ), layer->getLb( i ) ); + + unsigned targetVariable = layer->neuronToVariable( i ); + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + double bias; + SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); + + + List terms; + if ( FloatUtils::areEqual( lb, ub ) ) + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, ub ); + } + else + { + // Compute symbolic bound + if ( boundType == SoftmaxBoundType::LOG_SUM_EXP_DECOMPOSITION ) + { + bool useLSE2 = false; + for ( const auto &lb : targetLbs ) + { + if ( lb > GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD ) + useLSE2 = true; + } + unsigned inputIndex = 0; + if ( !useLSE2 ) + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dldj = + DeepPolySoftmaxElement::dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addGeqConstraint( terms, bias ); + } + else + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dldj = + DeepPolySoftmaxElement::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addGeqConstraint( terms, bias ); + } + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + inputIndex = 0; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dudj = + DeepPolySoftmaxElement::dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addLeqConstraint( terms, bias ); + } + else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + unsigned inputIndex = 0; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dldj = + DeepPolySoftmaxElement::dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addGeqConstraint( terms, bias ); + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); + inputIndex = 0; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dudj = + DeepPolySoftmaxElement::dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addLeqConstraint( terms, bias ); + } + } + } +} + +void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, + bool createVariables ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + Vector sourceNeurons; + unsigned counter = 0; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + + sourceNeurons.append( sourceNeuron ); + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + ++counter; + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + ++counter; + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + double targetValue = sourceValues[0] * sourceValues[1]; + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + continue; + } + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // Lower bound: out >= l_y * x + l_x * y - l_x * l_y + List terms; + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -sourceLbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( -sourceLbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addGeqConstraint( terms, -sourceLbs[0] * sourceLbs[1] ); + + // Upper bound: out <= u_y * x + l_x * y - l_x * u_y + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -sourceUbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( -sourceLbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addLeqConstraint( terms, -sourceLbs[0] * sourceUbs[1] ); + } + } +} + + void LPFormulator::addWeightedSumLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1074,7 +1612,7 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, if ( !FloatUtils::isNegative( sourceLb ) ) { - // The ReLU is active, y = x + // The LeakyReLU is active, y = x List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1083,7 +1621,7 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, } else if ( !FloatUtils::isPositive( sourceUb ) ) { - // The ReLU is inactive, y = alpha * x + // The LeakyReLU is inactive, y = alpha * x List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); @@ -1096,7 +1634,7 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; /* - The phase of this ReLU is not yet fixed. + The phase of this LeakyReLU is not yet fixed. For y = LeakyReLU(x), we add the following triangular relaxation: 1. y >= alpha * x 2. y >= x diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index 9a12a9e617..135854bf6b 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -103,7 +103,22 @@ class LPFormulator : public ParallelSolver void addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - + + void + addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); + + void + addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); + + void + addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); + + void + addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); + + void + addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); + void addWeightedSumLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); From 79f7a845e05ba789bf854deca01069422fe42bd4 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:25:47 +0300 Subject: [PATCH 12/81] 0-5-2: Adding forward-backward LP propagation tests for all activation functions. 0-5-2: Adding forward-backward LP propagation tests for all activation functions. --- src/nlr/tests/Test_NetworkLevelReasoner.h | 13076 +++++++++++++------- 1 file changed, 8774 insertions(+), 4302 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 42de1e97fa..bc4d620609 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -2,7 +2,7 @@ /*! \file Test_NetworkLevelReasoner.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Andrew Wu, Ido Shmuel + ** Guy Katz, Andrew Wu ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -1932,892 +1932,1964 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 5, -large ); tableau.setUpperBound( 5, large ); } - - void test_evaluate_relu() + + void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* - populateNetwork( nlr ); + 1 R -1 R -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ R / \ R / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 - double input[2]; - double output[2]; + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using ReLU activation instead of LeakyReLU + */ - // With ReLUs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); - // With ReLUs, case 1 - input[0] = 1; - input[1] = 1; + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); + nlr.setBias( 5, 1, 2 ); - // With ReLUs, case 2 - input[0] = 1; - input[1] = 2; + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); - } + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - void test_evaluate_sigmoids() - { - NLR::NetworkLevelReasoner nlr; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - populateNetworkWithSigmoids( nlr ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - double input[2]; - double output[2]; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - // case 1 - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - // case 2 - input[0] = 1; - input[1] = 1; + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardReLU2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = ReLU( x3 ) + x8 = ReLU( x4 ) + x9 = ReLU( x5 ) + x10 = ReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = ReLU( x11 ) + x15 = ReLU( x12 ) + x16 = ReLU( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = ReLU( x17 ) + x20 = ReLU( x18 ) + + x21 = x19 - x20 - 1 + */ - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::RELU, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); - // case 3 - input[0] = 1; - input[1] = 2; + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); - } - - void test_evaluate_abs() - { - NLR::NetworkLevelReasoner nlr; + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); - populateNetworkWithAbs( nlr ); + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); - double input[2]; - double output[2]; + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); - // With Abs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - // With Abs, case 1 - input[0] = -2; - input[1] = -2; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - // With Abs, case 2 - input[0] = 1; - input[1] = 2; + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); } - void test_evaluate_sign() + void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* - populateNetworkWithSign( nlr ); + 1 S -1 S -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ S / \ S / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 - double input[2]; - double output[2]; + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Sigmoid activation instead of LeakyReLU + */ - // With Sign, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); - // With Sign, case 1 - input[0] = -2; - input[1] = -2; + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + nlr.setBias( 5, 1, 2 ); - // With Sign, case 2 (0 considered "non-negative", sign(0)=1) - input[0] = -1; - input[1] = 1; + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); - } - - void test_evaluate_round() - { - NLR::NetworkLevelReasoner nlr; + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - populateNetworkWithRound( nlr ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - double input[2]; - double output[2]; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - // With Round, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - // With Round, case 1 - input[0] = 2.1; - input[1] = 1.4; + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardSigmoid2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Sigmoid( x3 ) + x8 = Sigmoid( x4 ) + x9 = Sigmoid( x5 ) + x10 = Sigmoid( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Sigmoid( x11 ) + x15 = Sigmoid( x12 ) + x16 = Sigmoid( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Sigmoid( x17 ) + x20 = Sigmoid( x18 ) + + x21 = x19 - x20 - 1 + */ - TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - // With Round, case 2 - input[0] = 2.1; - input[1] = 1.6; + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); - } - - void test_evaluate_leaky_relu() - { - NLR::NetworkLevelReasoner nlr; + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); - populateNetworkWithLeakyRelu( nlr ); + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); - double input[2]; - double output[2]; + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); - // With Leaky ReLU, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - // With Leaky ReLU, case 1 (alpha=0.1) - input[0] = 1; - input[1] = 1; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - // With Leaky ReLU, case 2 - input[0] = 1; - input[1] = 2; + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); } - void test_evaluate_max() + void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithMax( nlr ); + /* - double input[2]; - double output[1]; + 1 Sign -1 Sign -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ Sign / \ Sign / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 - // With Max, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Sign activation instead of LeakyReLU + */ - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - // With Max, case 1 - input[0] = 1; - input[1] = -3; + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); - // With Max, case 2 - input[0] = -3; - input[1] = 3; + nlr.setBias( 5, 1, 2 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); - } - - - void test_evaluate_softmax() - { - NLR::NetworkLevelReasoner nlr; + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - populateNetworkWithSoftmax( nlr ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - double input[2]; - double output[2]; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - // With Softmax, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - // With Softmax, case 1 - input[0] = 1; - input[1] = -3; + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - // With Softmax, case 2 - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); } - void test_evaluate_bilinear() + void populateNetworkBackwardSign2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Sign( x3 ) + x8 = Sign( x4 ) + x9 = SIgn( x5 ) + x10 = Sign( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Sign( x11 ) + x15 = Sign( x12 ) + x16 = Sign( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Sign( x17 ) + x20 = Sign( x18 ) + + x21 = x19 - x20 - 1 + */ - populateNetworkWithBilinear( nlr ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - double input[2]; - double output[1]; + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); - // With Bilinear, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); - // With Bilinear, case 1 - input[0] = 0.1; - input[1] = -0.3; + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); - // With Bilinear, case 2 - input[0] = -0.3; - input[1] = 0.3; + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); } - void test_evaluate_non_consecutive_layers() + void populateNetworkBackwardRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* + + 1 Rnd -1 Rnd -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ Rnd / \ Rnd / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Round activation instead of LeakyReLU + */ // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 2 ); nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); // Mark layer dependencies - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 0, 4 ); - nlr.addLayerDependency( 4, 5 ); + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - // Set the weights and relus + // Set the weights and biases for the weighted sum layers nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + nlr.setBias( 5, 1, 2 ); + + // Mark the Round sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 2, 3, 1, -2 ); - nlr.setWeight( 0, 1, 3, 1, 1 ); nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 2, 5, 0, 1 ); - - // Evaluate - double input[2]; - double output; - input[0] = 1; - input[1] = 1; + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - input[0] = -1; - input[1] = 2; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); - } + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - void test_evaluate_abs_and_relu() - { - NLR::NetworkLevelReasoner nlr; + // Very loose bounds for neurons except inputs + double large = 1000000; - populateNetworkWithAbsAndRelu( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); } - void test_evaluate_round_and_sign() + void populateNetworkBackwardRound2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRoundAndSign( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1.6; - input[1] = 1.4; + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Round( x3 ) + x8 = Round( x4 ) + x9 = Round( x5 ) + x10 = Round( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Round( x11 ) + x15 = Round( x12 ) + x16 = Round( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Round( x17 ) + x20 = Round( x18 ) + + x21 = x19 - x20 - 1 + */ - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); - input[0] = 1.6; - input[1] = 1.6; + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); - } - - void test_evaluate_leaky_relu_and_sigmoid() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyReluAndSigmoid( nlr ); + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); - input[0] = 1; - input[1] = 2; + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); - } - - void test_evaluate_softmax_and_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmaxAndMax( nlr ); - - double input[2]; - double output[1]; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - input[0] = 1; - input[1] = -3; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - input[0] = -3; - input[1] = 3; + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); } - void test_evaluate_relu_and_bilinear() + void populateNetworkBackwardAbs( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithReluAndBilinear( nlr ); - - double input[2]; - double output[1]; + /* - input[0] = 1; - input[1] = 1; + 1 A -1 A -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ A / \ A / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Absolute value activation instead of LeakyReLU + */ - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - input[0] = 1; - input[1] = 2; + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - } + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); - void test_store_into_other() - { - NLR::NetworkLevelReasoner nlr; + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); - populateNetwork( nlr ); + nlr.setBias( 5, 1, 2 ); - NLR::NetworkLevelReasoner nlr2; + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - double input[2]; - double output1[2]; - double output2[2]; + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - // Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - // With ReLUs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // With ReLUs, case 1 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); } - - void test_store_into_other_with_sigmoids() + + void populateNetworkBackwardAbs2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSigmoids( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Abs( x3 ) + x8 = Abs( x4 ) + x9 = Abs( x5 ) + x10 = Abs( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Abs( x11 ) + x15 = Abs( x12 ) + x16 = Abs( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Abs( x17 ) + x20 = Abs( x18 ) + + x21 = x19 - x20 - 1 + */ - // case 1 - input[0] = 0; - input[1] = 0; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); - // case 2 - input[0] = 1; - input[1] = 1; + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_round() - { - NLR::NetworkLevelReasoner nlr; + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); - populateNetworkWithRound( nlr ); + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); - NLR::NetworkLevelReasoner nlr2; + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - double input[2]; - double output1[2]; - double output2[2]; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - // case 1 - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - // case 2 - input[0] = 1; - input[1] = 1; + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); } - void test_store_into_other_with_sign() + void populateNetworkBackwardLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* - populateNetworkWithSign( nlr ); + 1 LR -1 LR -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ LR / \ LR / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 - NLR::NetworkLevelReasoner nlr2; + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + */ - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); - double input[2]; - double output1[2]; - double output2[2]; + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - // case 1 - input[0] = 0; - input[1] = 0; + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); - // case 2 - input[0] = 1; - input[1] = 1; + nlr.setBias( 5, 1, 2 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_abs() - { - NLR::NetworkLevelReasoner nlr; + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - populateNetworkWithAbs( nlr ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - NLR::NetworkLevelReasoner nlr2; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - double input[2]; - double output1[2]; - double output2[2]; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - // case 1 - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); } - void test_store_into_other_with_leaky_relu() + void populateNetworkBackwardLeakyRelu2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyRelu( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = LeakyReLU( x3 ) + x8 = LeakyReLU( x4 ) + x9 = LeakyReLU( x5 ) + x10 = LeakyReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = LeakyReLU( x11 ) + x15 = LeakyReLU( x12 ) + x16 = LeakyReLU( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = LeakyReLU( x17 ) + x20 = LeakyReLU( x18 ) + + x21 = x19 - x20 - 1 + */ - // case 1 - input[0] = 0; - input[1] = 0; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + nlr.getLayer( 6 )->setAlpha( 0.1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); - // case 2 - input[0] = 1; - input[1] = 1; + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_max() - { - NLR::NetworkLevelReasoner nlr; + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); - populateNetworkWithMax( nlr ); + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); - NLR::NetworkLevelReasoner nlr2; + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - double input[2]; - double output1[2]; - double output2[2]; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - // case 1 - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - // case 2 - input[0] = 1; - input[1] = 1; + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); } - void test_store_into_other_with_softmax() + void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmax( nlr ); - - NLR::NetworkLevelReasoner nlr2; + /* + a a' + x e + b b' f h + y d + c c' + */ - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - double input[2]; - double output1[2]; - double output2[2]; + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - // case 1 - input[0] = 0; - input[1] = 0; + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + nlr.setWeight( 4, 0, 5, 0, -1 ); - // case 2 - input[0] = 1; - input[1] = 1; + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_bilinear() - { - NLR::NetworkLevelReasoner nlr; + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); - populateNetworkWithBilinear( nlr ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - NLR::NetworkLevelReasoner nlr2; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - double input[2]; - double output1[2]; - double output2[2]; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - // case 1 - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); } - void test_generate_input_query() + + void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* + x3 x7 + x0 x11 x14 + x4 x8 + x1 x12 x15 x17 + x5 x9 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7, x8, x9, x10 = Softmax( x2, x3, x4, x5 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14, x15, x16 = Softmax( x11, x12, x13 ) + + x17 = Max( x14, x15, x16 ) + */ // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 5, NLR::Layer::MAX, 1 ); // Mark layer dependencies for ( unsigned i = 1; i <= 5; ++i ) nlr.addLayerDependency( i - 1, i ); - // Variable indexing + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 3, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 2 ); + nlr.addActivationSource( 1, 0, 2, 3 ); + nlr.addActivationSource( 1, 1, 2, 3 ); + nlr.addActivationSource( 1, 2, 2, 3 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + nlr.addActivationSource( 3, 2, 4, 0 ); + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 1 ); + nlr.addActivationSource( 3, 0, 4, 2 ); + nlr.addActivationSource( 3, 1, 4, 2 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 4, 0, 5, 0 ); + nlr.addActivationSource( 4, 1, 5, 0 ); + nlr.addActivationSource( 4, 2, 5, 0 ); + // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - // Set the weights and biases for the weighted sum layers + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 18 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + } + + void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + a a' + x e + b b' f h + y d + c c' + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + // Set the weights and biases for the weighted sum layers nlr.setWeight( 0, 0, 1, 0, 1 ); nlr.setWeight( 0, 0, 1, 1, 2 ); nlr.setWeight( 0, 1, 1, 1, -3 ); @@ -2828,700 +3900,520 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 2, 1, 3, 0, 1 ); nlr.setWeight( 2, 1, 3, 1, 1 ); nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); + nlr.setWeight( 4, 0, 5, 0, -1 ); nlr.setBias( 1, 0, 1 ); - nlr.setBias( 1, 1, 0 ); - nlr.setBias( 1, 2, 0 ); - - nlr.setBias( 3, 0, 0 ); nlr.setBias( 3, 1, 2 ); - nlr.setBias( 5, 0, 0 ); - nlr.setBias( 5, 1, 0 ); - - // Mark the ReLU/Abs sources + // Mark the ReLU/Bilinear sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 0 ); - // Start the testing - Query ipq; - nlr.generateQuery( ipq ); - List unhandledEquations; - Set varsInUnhandledConstraints; - TS_ASSERT( - ipq.constructNetworkLevelReasoner( unhandledEquations, varsInUnhandledConstraints ) ); - NLR::NetworkLevelReasoner *reconstructedNlr = ipq.getNetworkLevelReasoner(); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - double input[2]; - double output[2]; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - input[0] = 1; - input[1] = 1; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - input[0] = 1; - input[1] = 2; + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); } - - void test_simulate_relu() + + void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* + x3 x7 + x0 + x4 x8 x11 x13 + x1 x15 + x5 x9 x12 x14 + x2 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = ReLU( x3 ) + x8 = ReLU( x4 ) + x9 = ReLU( x5 ) + x10 = ReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + + x13 = ReLU( x11 ) + x14 = ReLU( x12 ) + + x15 = x13 * x14 + */ - populateNetwork( nlr ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::BILINEAR, 1 ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - // With ReLUs, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } + nlr.setBias( 3, 0, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + nlr.addActivationSource( 4, 0, 5, 0 ); + nlr.addActivationSource( 4, 1, 5, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 14 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 15 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 16 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + } + + void test_evaluate_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + double input[2]; + double output[2]; + + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); // With ReLUs, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); + input[0] = 1; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 1 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); - // With ReLUs, case 1 and 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); + // With ReLUs, case 2 + input[0] = 1; + input[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); } - void test_simulate_sigmoids() + void test_evaluate_sigmoids() { NLR::NetworkLevelReasoner nlr; populateNetworkWithSigmoids( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + double input[2]; + double output[2]; // case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + input[0] = 0; + input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.6750, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 3.0167, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); // case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); + input[0] = 1; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.6032, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.5790, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); // case 3 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); + input[0] = 1; + input[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.5045, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.1957, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); } - void test_simulate_round() + void test_evaluate_abs() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithRound( nlr ); + populateNetworkWithAbs( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + double input[2]; + double output[2]; - // With Round, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + // With Abs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // With Round, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 2.1 ) ); - simulations2.append( Vector( simulationSize, 1.4 ) ); + // With Abs, case 1 + input[0] = -2; + input[1] = -2; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // With Round, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 2.1 ) ); - simulations3.append( Vector( simulationSize, 1.6 ) ); + // With Abs, case 2 + input[0] = 1; + input[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -12, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); } - void test_simulate_sign() + void test_evaluate_sign() { NLR::NetworkLevelReasoner nlr; populateNetworkWithSign( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + double input[2]; + double output[2]; // With Sign, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + input[0] = 0; + input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); // With Sign, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -2 ) ); - simulations2.append( Vector( simulationSize, -2 ) ); + input[0] = -2; + input[1] = -2; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // With Sign, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -1 ) ); - simulations3.append( Vector( simulationSize, 1 ) ); + // With Sign, case 2 (0 considered "non-negative", sign(0)=1) + input[0] = -1; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); } - void test_simulate_abs() + void test_evaluate_round() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithAbs( nlr ); + populateNetworkWithRound( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + double input[2]; + double output[2]; - // With Abs, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + // With Round, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // With Abs, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -2 ) ); - simulations2.append( Vector( simulationSize, -2 ) ); + // With Round, case 1 + input[0] = 2.1; + input[1] = 1.4; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); - // With Abs, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); + // With Round, case 2 + input[0] = 2.1; + input[1] = 1.6; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 4 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 10 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); } - void test_simulate_leaky_relu() + void test_evaluate_leaky_relu() { NLR::NetworkLevelReasoner nlr; populateNetworkWithLeakyRelu( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + double input[2]; + double output[2]; // With Leaky ReLU, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + input[0] = 0; + input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); // With Leaky ReLU, case 1 (alpha=0.1) - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); + input[0] = 1; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.9, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0.57, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); // With Leaky ReLU, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); + input[0] = 1; + input[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -0.04, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -0.76, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); } - void test_simulate_max() + void test_evaluate_max() { NLR::NetworkLevelReasoner nlr; populateNetworkWithMax( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + double input[2]; + double output[1]; // With Max, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + input[0] = 0; + input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -3 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); // With Max, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, -3 ) ); + input[0] = 1; + input[1] = -3; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -18 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); // With Max, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -3 ) ); - simulations3.append( Vector( simulationSize, 3 ) ); + input[0] = -3; + input[1] = 3; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -5 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); } - void test_simulate_softmax() + + void test_evaluate_softmax() { NLR::NetworkLevelReasoner nlr; populateNetworkWithSoftmax( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + double input[2]; + double output[2]; // With Softmax, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + input[0] = 0; + input[1] = 0; - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.2999, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.4001, - 0.0001 ) ); - } + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); // With Softmax, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, -3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + input[0] = 1; + input[1] = -3; - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.1192, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.7615, - 0.0001 ) ); - } + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); // With Softmax, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -3 ) ); - simulations3.append( Vector( simulationSize, 3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.1206, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.7588, - 0.0001 ) ); - } + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); } - void test_simulate_bilinear() + void test_evaluate_bilinear() { NLR::NetworkLevelReasoner nlr; populateNetworkWithBilinear( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + double input[2]; + double output[1]; // With Bilinear, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + input[0] = 0; + input[1] = 0; - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); // With Bilinear, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 0.1 ) ); - simulations2.append( Vector( simulationSize, -0.3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + input[0] = 0.1; + input[1] = -0.3; - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2.8304, - 0.0001 ) ); - } + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); // With Bilinear, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -0.3 ) ); - simulations3.append( Vector( simulationSize, 0.3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + input[0] = -0.3; + input[1] = 0.3; - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.0912, - 0.0001 ) ); - } + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); } - - void test_simulate_non_consecutive_layers() + + void test_evaluate_non_consecutive_layers() { NLR::NetworkLevelReasoner nlr; @@ -3565,889 +4457,3056 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 4, 1, 5, 0, 1 ); nlr.setWeight( 4, 2, 5, 0, 1 ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + // Evaluate + double input[2]; + double output; - // Simulate1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); + input[0] = 1; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2 ) ); + input[0] = -1; + input[1] = 2; - // Simulate2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); - for ( unsigned i = 0; i < simulationSize; ++i ) - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); } - void test_simulate_abs_and_relu() + void test_evaluate_abs_and_relu() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithAbsAndRelu( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + double input[2]; + double output[2]; - // Simulate1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); + input[0] = 1; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - // Simulate2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); + input[0] = 1; + input[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 4 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); } - void test_simulate_round_and_sign() + void test_evaluate_round_and_sign() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithRoundAndSign( nlr ); + + double input[2]; + double output[2]; - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Round/Sign, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1.6 ) ); - simulations1.append( Vector( simulationSize, 1.4 ) ); + input[0] = 1.6; + input[1] = 1.4; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -2 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); - // With Round/Sign, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1.6 ) ); - simulations2.append( Vector( simulationSize, 1.6 ) ); + input[0] = 1.6; + input[1] = 1.6; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); } - void test_simulate_leaky_relu_and_sigmoid() + void test_evaluate_leaky_relu_and_sigmoid() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + double input[2]; + double output[2]; - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With LeakyReLU/Sigmoid, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); + input[0] = 1; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.7109, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 1.4602, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); - // With LeakyReLU/Sigmoid, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); + input[0] = 1; + input[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.4013, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0.6508, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); } - void test_simulate_softmax_and_max() + void test_evaluate_softmax_and_max() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithSoftmaxAndMax( nlr ); + + double input[2]; + double output[1]; - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With LeakyReLU/Sigmoid, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, -3 ) ); + input[0] = 1; + input[1] = -3; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -2.9998, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); - // With LeakyReLU/Sigmoid, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -3 ) ); - simulations2.append( Vector( simulationSize, 3 ) ); + input[0] = -3; + input[1] = 3; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); } - void test_simulate_relu_and_bilinear() + void test_evaluate_relu_and_bilinear() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithReluAndBilinear( nlr ); + + double input[2]; + double output[1]; - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Relu/Bilinear, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); + input[0] = 1; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - // With ReLU/Bilinear, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); + input[0] = 1; + input[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); } - - void test_interval_arithmetic_bound_propagation_relu_constraints() + + void test_store_into_other() { NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + NLR::NetworkLevelReasoner nlr2; - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + double input[2]; + double output1[2]; + double output2[2]; - nlr.setTableau( &tableau ); + // Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + // With ReLUs, case 1 + input[0] = 1; + input[1] = 1; - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + void test_store_into_other_with_sigmoids() + { + NLR::NetworkLevelReasoner nlr; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + populateNetworkWithSigmoids( nlr ); - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + NLR::NetworkLevelReasoner nlr2; - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + double input[2]; + double output1[2]; + double output2[2]; - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + // case 1 + input[0] = 0; + input[1] = 0; - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + // case 2 + input[0] = 1; + input[1] = 1; - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - - void test_interval_arithmetic_bound_propagation_abs_constraints() + + void test_store_into_other_with_round() { NLR::NetworkLevelReasoner nlr; - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + populateNetworkWithRound( nlr ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); + NLR::NetworkLevelReasoner nlr2; - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); + double input[2]; + double output1[2]; + double output2[2]; - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); + // case 1 + input[0] = 0; + input[1] = 0; - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + // case 2 + input[0] = 1; + input[1] = 1; - // Layer dependenices - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 4, 5 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_sign() + { + NLR::NetworkLevelReasoner nlr; - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + populateNetworkWithSign( nlr ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + NLR::NetworkLevelReasoner nlr2; - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + double input[2]; + double output1[2]; + double output2[2]; - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + // case 1 + input[0] = 0; + input[1] = 0; - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + // case 2 + input[0] = 1; + input[1] = 1; - nlr.setTableau( &tableau ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_abs() + { + NLR::NetworkLevelReasoner nlr; - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + populateNetworkWithAbs( nlr ); - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + NLR::NetworkLevelReasoner nlr2; - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + double input[2]; + double output1[2]; + double output2[2]; - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + // case 1 + input[0] = 0; + input[1] = 0; - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), - } ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + // case 2 + input[0] = 1; + input[1] = 1; - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + populateNetworkWithLeakyRelu( nlr ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + NLR::NetworkLevelReasoner nlr2; - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + double input[2]; + double output1[2]; + double output2[2]; - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + // case 1 + input[0] = 0; + input[1] = 0; - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), - } ); + // case 2 + input[0] = 1; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - void test_interval_arithmetic_bound_propagation_sign_constraints() + void test_store_into_other_with_max() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithSign( nlr ); - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + populateNetworkWithMax( nlr ); - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + NLR::NetworkLevelReasoner nlr2; - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - nlr.setTableau( &tableau ); + double input[2]; + double output1[2]; + double output2[2]; - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + // case 1 + input[0] = 0; + input[1] = 0; - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + // case 2 + input[0] = 1; + input[1] = 1; - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_softmax() + { + NLR::NetworkLevelReasoner nlr; - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + populateNetworkWithSoftmax( nlr ); - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); + NLR::NetworkLevelReasoner nlr2; - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - // Change the current bounds - tableau.setLowerBound( 0, 3 ); - tableau.setUpperBound( 0, 4 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + double input[2]; + double output1[2]; + double output2[2]; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + // case 1 + input[0] = 0; + input[1] = 0; - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - List expectedBounds2( { - Tightening( 2, 4, Tightening::LB ), Tightening( 2, 5, Tightening::UB ), - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + // case 2 + input[0] = 1; + input[1] = 1; - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 11, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_bilinear() + { + NLR::NetworkLevelReasoner nlr; - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + populateNetworkWithBilinear( nlr ); - Tightening( 10, 1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + NLR::NetworkLevelReasoner nlr2; - Tightening( 12, 1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, 4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - void test_interval_arithmetic_bound_propagation_leaky_relu_constraints() + void test_generate_input_query() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithLeakyRelu( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - nlr.setTableau( &tableau ); + // Variable indexing - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), - - Tightening( 9, -0.28, Tightening::LB ), Tightening( 9, 10.1, Tightening::UB ), - Tightening( 11, -0.38, Tightening::LB ), Tightening( 11, 9.1, Tightening::UB ), + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - Tightening( 12, -0.28, Tightening::LB ), Tightening( 12, 10.1, Tightening::UB ), - Tightening( 13, -1.42, Tightening::LB ), Tightening( 13, 37.4, Tightening::UB ), - } ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + // Set the weights and biases for the weighted sum layers - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 0 ); + nlr.setBias( 1, 2, 0 ); - Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + nlr.setBias( 3, 0, 0 ); + nlr.setBias( 3, 1, 2 ); - Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), - Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), - - Tightening( 9, -0.34, Tightening::LB ), Tightening( 9, 7.1, Tightening::UB ), - Tightening( 11, -0.32, Tightening::LB ), Tightening( 11, 7.3, Tightening::UB ), + nlr.setBias( 5, 0, 0 ); + nlr.setBias( 5, 1, 0 ); - Tightening( 12, -0.34, Tightening::LB ), Tightening( 12, 7.1, Tightening::UB ), - Tightening( 13, -1.3, Tightening::LB ), Tightening( 13, 29, Tightening::UB ), - } ); + // Mark the ReLU/Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_round_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithRound( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - // Initialize the bounds - tableau.setLowerBound( 0, 1.4 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, -1.4 ); - tableau.setUpperBound( 1, 2.1 ); + // Start the testing + Query ipq; + nlr.generateQuery( ipq ); + List unhandledEquations; + Set varsInUnhandledConstraints; + TS_ASSERT( + ipq.constructNetworkLevelReasoner( unhandledEquations, varsInUnhandledConstraints ) ); + NLR::NetworkLevelReasoner *reconstructedNlr = ipq.getNetworkLevelReasoner(); + + double input[2]; + double output[2]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + } + + void test_simulate_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With ReLUs, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With ReLUs, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 1 ) ); + } + + // With ReLUs, case 1 and 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0 ) ); + } + } + + void test_simulate_sigmoids() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSigmoids( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.6750, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 3.0167, + 0.0001 ) ); + } + + // case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.6032, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.5790, + 0.0001 ) ); + } + + // case 3 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.5045, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.1957, + 0.0001 ) ); + } + } + + void test_simulate_round() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRound( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Round, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Round, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 2.1 ) ); + simulations2.append( Vector( simulationSize, 1.4 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4, + 0.0001 ) ); + } + + // With Round, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 2.1 ) ); + simulations3.append( Vector( simulationSize, 1.6 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -12, + 0.0001 ) ); + } + } + + void test_simulate_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSign( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Sign, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Sign, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, -2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Sign, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -1 ) ); + simulations3.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4 ) ); + } + } + + void test_simulate_abs() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbs( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Abs, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Abs, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, -2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Abs, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 4 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 10 ) ); + } + } + + void test_simulate_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyRelu( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Leaky ReLU, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Leaky ReLU, case 1 (alpha=0.1) + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.9, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0.57, + 0.0001 ) ); + } + + // With Leaky ReLU, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -0.04, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -0.76, + 0.0001 ) ); + } + } + + void test_simulate_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithMax( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Max, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -3 ) ); + } + + // With Max, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, -3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -18 ) ); + } + + // With Max, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -3 ) ); + simulations3.append( Vector( simulationSize, 3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -5 ) ); + } + } + + void test_simulate_softmax() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmax( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Softmax, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.2999, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.4001, + 0.0001 ) ); + } + + // With Softmax, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, -3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.1192, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.7615, + 0.0001 ) ); + } + + // With Softmax, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -3 ) ); + simulations3.append( Vector( simulationSize, 3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.1206, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.7588, + 0.0001 ) ); + } + } + + void test_simulate_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithBilinear( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Bilinear, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } + + // With Bilinear, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 0.1 ) ); + simulations2.append( Vector( simulationSize, -0.3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2.8304, + 0.0001 ) ); + } + + // With Bilinear, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -0.3 ) ); + simulations3.append( Vector( simulationSize, 0.3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.0912, + 0.0001 ) ); + } + } + + void test_simulate_non_consecutive_layers() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 0, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Set the weights and relus + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 2, 3, 1, -2 ); + nlr.setWeight( 0, 1, 3, 1, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 2, 5, 0, 1 ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // Simulate1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2 ) ); + + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } + + void test_simulate_abs_and_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbsAndRelu( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // Simulate1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2 ) ); + } + + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 4 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + } + + void test_simulate_round_and_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRoundAndSign( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Round/Sign, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1.6 ) ); + simulations1.append( Vector( simulationSize, 1.4 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -2 ) ); + } + + // With Round/Sign, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1.6 ) ); + simulations2.append( Vector( simulationSize, 1.6 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4 ) ); + } + } + + void test_simulate_leaky_relu_and_sigmoid() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With LeakyReLU/Sigmoid, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.7109, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 1.4602, + 0.0001 ) ); + } + + // With LeakyReLU/Sigmoid, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.4013, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0.6508, + 0.0001 ) ); + } + } + + void test_simulate_softmax_and_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmaxAndMax( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With LeakyReLU/Sigmoid, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, -3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -2.9998, + 0.0001 ) ); + } + + // With LeakyReLU/Sigmoid, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -3 ) ); + simulations2.append( Vector( simulationSize, 3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1, + 0.0001 ) ); + } + } + + void test_simulate_relu_and_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithReluAndBilinear( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Relu/Bilinear, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + } + + // With ReLU/Bilinear, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } + } + + void test_interval_arithmetic_bound_propagation_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_abs_constraints() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Layer dependenices + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_sign_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSign( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, 3 ); + tableau.setUpperBound( 0, 4 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, 4, Tightening::LB ), Tightening( 2, 5, Tightening::UB ), + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 11, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, 1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, 1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, 4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_leaky_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithLeakyRelu( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), + + Tightening( 9, -0.28, Tightening::LB ), Tightening( 9, 10.1, Tightening::UB ), + Tightening( 11, -0.38, Tightening::LB ), Tightening( 11, 9.1, Tightening::UB ), + + Tightening( 12, -0.28, Tightening::LB ), Tightening( 12, 10.1, Tightening::UB ), + Tightening( 13, -1.42, Tightening::LB ), Tightening( 13, 37.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), + Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), + + Tightening( 9, -0.34, Tightening::LB ), Tightening( 9, 7.1, Tightening::UB ), + Tightening( 11, -0.32, Tightening::LB ), Tightening( 11, 7.3, Tightening::UB ), + + Tightening( 12, -0.34, Tightening::LB ), Tightening( 12, 7.1, Tightening::UB ), + Tightening( 13, -1.3, Tightening::LB ), Tightening( 13, 29, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_round_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithRound( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, 1.4 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, -1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), + Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), + + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + Tightening( 11, -7, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, -25, Tightening::LB ), Tightening( 13, 35, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3.1 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, 1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), + Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), + Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + + Tightening( 9, -16, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -15, Tightening::LB ), Tightening( 11, 2, Tightening::UB ), + + Tightening( 12, -16, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -61, Tightening::LB ), Tightening( 13, 7, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_sigmoid_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSigmoids( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.5000, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), + Tightening( 5, 0.0067, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), + Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.7311, Tightening::UB ), + + Tightening( 8, -0.2244, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), + Tightening( 10, 0.3948, Tightening::LB ), Tightening( 10, 2.2244, Tightening::UB ), + + Tightening( 9, 0.4441, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), + Tightening( 11, 0.5974, Tightening::LB ), Tightening( 11, 0.9024, Tightening::UB ), + + Tightening( 12, 0.4441, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), + Tightening( 13, 2.2364, Tightening::LB ), Tightening( 13, 3.5399, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.1192, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), + Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + + Tightening( 8, -0.7616, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), + Tightening( 10, 0.2384, Tightening::LB ), Tightening( 10, 2.6052, Tightening::UB ), + + Tightening( 9, 0.3183, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), + Tightening( 11, 0.5593, Tightening::LB ), Tightening( 11, 0.9312, Tightening::UB ), + + Tightening( 12, 0.3183, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), + Tightening( 13, 1.9963, Tightening::LB ), Tightening( 13, 3.6263, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_max_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithMax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 10, Tightening::UB ), + Tightening( 9, -8, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + + Tightening( 11, -10, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -8, Tightening::LB ), Tightening( 3, 9, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 9, Tightening::UB ), + Tightening( 7, -5, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 16, Tightening::UB ), + Tightening( 9, -14, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -5, Tightening::LB ), Tightening( 10, 16, Tightening::UB ), + + Tightening( 11, -16, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_softmax_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSoftmax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), + Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 9, 0.0240, Tightening::LB ), Tightening( 9, 0.8349, Tightening::UB ), + Tightening( 11, 0.1651, Tightening::LB ), Tightening( 11, 0.9759, Tightening::UB ), + + Tightening( 12, 0.0240, Tightening::LB ), Tightening( 12, 0.8349, Tightening::UB ), + Tightening( 13, 0.5192, Tightening::LB ), Tightening( 13, 3.7629, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), + + Tightening( 9, 0.0184, Tightening::LB ), Tightening( 9, 0.8678, Tightening::UB ), + Tightening( 11, 0.1322, Tightening::LB ), Tightening( 11, 0.9816, Tightening::UB ), + + Tightening( 12, 0.0184, Tightening::LB ), Tightening( 12, 0.8678, Tightening::UB ), + Tightening( 13, 0.4151, Tightening::LB ), Tightening( 13, 3.8125, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_bilinear_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithBilinear( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -0.1 ); + tableau.setUpperBound( 0, 0.1 ); + tableau.setLowerBound( 1, -0.1 ); + tableau.setUpperBound( 1, 0.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0.9, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), + Tightening( 3, -0.5, Tightening::LB ), Tightening( 3, 0.5, Tightening::UB ), + Tightening( 4, -0.3, Tightening::LB ), Tightening( 4, 0.3, Tightening::UB ), + Tightening( 5, -0.3, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), + + Tightening( 6, -0.55, Tightening::LB ), Tightening( 6, 0.55, Tightening::UB ), + Tightening( 7, -0.09, Tightening::LB ), Tightening( 7, 0.09, Tightening::UB ), + + Tightening( 8, 1.36, Tightening::LB ), Tightening( 8, 2.64, Tightening::UB ), + Tightening( 9, -0.64, Tightening::LB ), Tightening( 9, 0.64, Tightening::UB ), + + Tightening( 10, -1.6896, Tightening::LB ), Tightening( 10, 1.6896, Tightening::UB ), + + Tightening( 11, -1.6896, Tightening::LB ), Tightening( 11, 1.6896, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -0.3 ); + tableau.setUpperBound( 0, 0.1 ); + tableau.setLowerBound( 1, -0.1 ); + tableau.setUpperBound( 1, 0.2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, 0.7, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), + Tightening( 3, -0.8, Tightening::LB ), Tightening( 3, 0.9, Tightening::UB ), + Tightening( 4, -0.5, Tightening::LB ), Tightening( 4, 0.5, Tightening::UB ), + Tightening( 5, -0.6, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), + + Tightening( 6, -0.88, Tightening::LB ), Tightening( 6, 0.99, Tightening::UB ), + Tightening( 7, -0.3, Tightening::LB ), Tightening( 7, 0.3, Tightening::UB ), + + Tightening( 8, 0.82, Tightening::LB ), Tightening( 8, 3.29, Tightening::UB ), + Tightening( 9, -1.29, Tightening::LB ), Tightening( 9, 1.18, Tightening::UB ), + + Tightening( 10, -4.2441, Tightening::LB ), Tightening( 10, 3.8822, Tightening::UB ), + + Tightening( 11, -3.8822, Tightening::LB ), Tightening( 11, 4.2441, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_abs_and_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithAbsAndRelu( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -5, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + + Tightening( 10, -10, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_round_and_sign_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithRoundAndSign( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, 1.4 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, -1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), + Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), + + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3.1 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, 1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), + Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), + Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_leaky_relu_and_sigmoid_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); double large = 1000; tableau.setLowerBound( 2, -large ); @@ -4475,366 +7534,922 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 13, -large ); tableau.setUpperBound( 13, large ); - nlr.setTableau( &tableau ); + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), + + Tightening( 9, 0.0573, Tightening::LB ), Tightening( 9, 0.9999, Tightening::UB ), + Tightening( 11, 0.0219, Tightening::LB ), Tightening( 11, 0.9999, Tightening::UB ), + + Tightening( 12, 0.0573, Tightening::LB ), Tightening( 12, 0.9999, Tightening::UB ), + Tightening( 13, 0.1229, Tightening::LB ), Tightening( 13, 3.9996, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), + Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), + + Tightening( 9, 0.0323, Tightening::LB ), Tightening( 9, 0.9992, Tightening::UB ), + Tightening( 11, 0.0392, Tightening::LB ), Tightening( 11, 0.9993, Tightening::UB ), + + Tightening( 12, 0.0323, Tightening::LB ), Tightening( 12, 0.9992, Tightening::UB ), + Tightening( 13, 0.1498, Tightening::LB ), Tightening( 13, 3.9972, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_softmax_and_max_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSoftmaxAndMax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), + Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), + + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ) + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.0654, Tightening::LB ), Tightening( 9, 2.9933, Tightening::UB ), + + Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), + + Tightening( 11, -2.9933, Tightening::LB ), Tightening( 11, -0.0654, Tightening::UB ) + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_relu_and_bilinear_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithReluAndBilinear( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), + + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -14, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), + + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_sbt_relus_all_active() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both ReLUs active, bound survive through activations: + + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. + Coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = 0.5x0 + 1.25x1 - 11.25 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relus_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); - // Initialize + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds( { - Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), - Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), - - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), - Tightening( 11, -7, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Layer 1: - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), - Tightening( 13, -25, Tightening::LB ), Tightening( 13, 35, Tightening::UB ), - } ); + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - // Change the current bounds - tableau.setLowerBound( 0, -3.1 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, 1.4 ); - tableau.setUpperBound( 1, 2.1 ); + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + x4.lb = 0 + x4.ub = 0 - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Layer 2: - List expectedBounds2( { - Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), - Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), - Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ - Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - - Tightening( 9, -16, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -15, Tightening::LB ), Tightening( 11, 2, Tightening::UB ), + List expectedBounds( { + // x2 does not appear, because it has been eliminated - Tightening( 12, -16, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -61, Tightening::LB ), Tightening( 13, 7, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_interval_arithmetic_bound_propagation_sigmoid_constraints() + + void test_sbt_relu_residual1() { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSigmoids( nlr ); - + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual1( nlr, tableau ); - // Initialize the bounds tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - nlr.setTableau( &tableau ); + /* + Input ranges: - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x0: [-1, 1] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Layer 1: - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. + Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - Tightening( 3, 0.5000, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), - Tightening( 5, 0.0067, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), - Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.7311, Tightening::UB ), + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layer 2 (with residual from x0): - Tightening( 8, -0.2244, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), - Tightening( 10, 0.3948, Tightening::LB ), Tightening( 10, 2.2244, Tightening::UB ), + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. + Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. - Tightening( 9, 0.4441, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), - Tightening( 11, 0.5974, Tightening::LB ), Tightening( 11, 0.9024, Tightening::UB ), + x4.lb = 0 + x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] + x4 range: [0, 2.5] - Tightening( 12, 0.4441, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), - Tightening( 13, 2.2364, Tightening::LB ), Tightening( 13, 3.5399, Tightening::UB ), + Layer 3 (with residual from x1): + + x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] + + x5 range: [-2, 4] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 2, Tightening::LB ), + Tightening( 5, 5.5, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relu_residual2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual2( nlr, tableau ); - // Change the current bounds - tableau.setLowerBound( 0, -3 ); + tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x0: [-1, 1] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Layer 1: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. + Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - Tightening( 3, 0.1192, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), - Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layer 2 (with residual from x0): - Tightening( 8, -0.7616, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), - Tightening( 10, 0.2384, Tightening::LB ), Tightening( 10, 2.6052, Tightening::UB ), + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. + Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. - Tightening( 9, 0.3183, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), - Tightening( 11, 0.5593, Tightening::LB ), Tightening( 11, 0.9312, Tightening::UB ), + x4.lb = 0 + x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] + x4 range: [0, 2.5] - Tightening( 12, 0.3183, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), - Tightening( 13, 1.9963, Tightening::LB ), Tightening( 13, 3.6263, Tightening::UB ), + Layer 3 (with residual from x0): + + x5.lb = 3 ( 0 ) + 1 ( x0 ) + 1 = 1x0 + 1 : [0, 2] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 1 ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] + x5 range: [0, 7.5] + + Layer 4: + x6.lb = 1x0 + 1 : [0, 2] + x6.ub = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] + x6 range: [0, 7.5] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 7.5, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 7.5, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_interval_arithmetic_bound_propagation_max_constraints() + void test_sbt_relu_reindex() { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithMax( nlr ); - + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); + nlr.setTableau( &tableau ); + populateNetworkSBTReluReindex( nlr, tableau ); - // Initialize the bounds tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - nlr.setTableau( &tableau ); + /* + Input ranges: - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x0: [-1, 1] + x1: [-1, 1] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Layer 1: - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 10, Tightening::UB ), - Tightening( 9, -8, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Both ReLUs are undecided, bounds are concretized. + Coefficient: 2/( 2--2 ) = 2/4 = 0.5 - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 + x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 + x4 range: [0, 2] + + x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 + x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 + x5 range: [0, 2] - Tightening( 11, -10, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - } ); + Layer 2: + + x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] + x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] + x6 range: [-1, 3] + + x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] + x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] + x7 range: [-2, 2] + + Both ReLUs are undecided, bounds are concretized. + Coefficient (first ReLU, lower): 1/( 1--1 ) = 1/2 = 0.5 + Coefficient (first ReLU, upper): 1 (propagated as is) + Coefficient (second ReLU, lower): 0 (bound is zero) + Coefficient (second ReLU, upper): 2/( 2--2 ) = 2/4 = 0.5 + + x8.lb = 0.5 ( x0 ) = 0.5x0 + x8.ub = x0 + 2 + x8 range: [0, 3] + + x9.lb = 0 + x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 + x9 range: [0, 2] + + Layer 3: + + x10.lb = 1 ( 0.5x0 ) + 1 ( 0 ) + 1 = 0.5x0 + 1 : [0.5, 1.5] + x10.ub = 1 ( x0 + 2 ) + 1 ( 0.5x1 + 1.5 ) + 1 = x0 + 0.5x1 + 4.5 : [3, 6] + x10 range: [0.5, 6] + + x11.lb = 0.5x1 - 0.5 + x11.ub = 0.5x1 + 1.5 + x11 range: [0, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, 0.5, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_abs_all_positive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -8, Tightening::LB ), Tightening( 3, 9, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 9, Tightening::UB ), - Tightening( 7, -5, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 16, Tightening::UB ), - Tightening( 9, -14, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -5, Tightening::LB ), Tightening( 10, 16, Tightening::UB ), - - Tightening( 11, -16, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_softmax_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSoftmax( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; - double large = 1000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -4845,184 +8460,109 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - nlr.setTableau( &tableau ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Initialize + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), - Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - - Tightening( 9, 0.0240, Tightening::LB ), Tightening( 9, 0.8349, Tightening::UB ), - Tightening( 11, 0.1651, Tightening::LB ), Tightening( 11, 0.9759, Tightening::UB ), + Layer 1: - Tightening( 12, 0.0240, Tightening::LB ), Tightening( 12, 0.8349, Tightening::UB ), - Tightening( 13, 0.5192, Tightening::LB ), Tightening( 13, 3.7629, Tightening::UB ), - } ); + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + Both absolute values positive, bound survive through activations: - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Layer 2: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), - - Tightening( 9, 0.0184, Tightening::LB ), Tightening( 9, 0.8678, Tightening::UB ), - Tightening( 11, 0.1322, Tightening::LB ), Tightening( 11, 0.9816, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), - Tightening( 12, 0.0184, Tightening::LB ), Tightening( 12, 0.8678, Tightening::UB ), - Tightening( 13, 0.4151, Tightening::LB ), Tightening( 13, 3.8125, Tightening::UB ), + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_interval_arithmetic_bound_propagation_bilinear_constraints() + + void test_sbt_abs_positive_and_negative() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; - populateNetworkWithBilinear( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -0.1 ); - tableau.setUpperBound( 0, 0.1 ); - tableau.setLowerBound( 1, -0.1 ); - tableau.setUpperBound( 1, 0.1 ); + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - nlr.setTableau( &tableau ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - List expectedBounds( { - Tightening( 2, 0.9, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), - Tightening( 3, -0.5, Tightening::LB ), Tightening( 3, 0.5, Tightening::UB ), - Tightening( 4, -0.3, Tightening::LB ), Tightening( 4, 0.3, Tightening::UB ), - Tightening( 5, -0.3, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), - - Tightening( 6, -0.55, Tightening::LB ), Tightening( 6, 0.55, Tightening::UB ), - Tightening( 7, -0.09, Tightening::LB ), Tightening( 7, 0.09, Tightening::UB ), + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - Tightening( 8, 1.36, Tightening::LB ), Tightening( 8, 2.64, Tightening::UB ), - Tightening( 9, -0.64, Tightening::LB ), Tightening( 9, 0.64, Tightening::UB ), - - Tightening( 10, -1.6896, Tightening::LB ), Tightening( 10, 1.6896, Tightening::UB ), - - Tightening( 11, -1.6896, Tightening::LB ), Tightening( 11, 1.6896, Tightening::UB ), - } ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - // Change the current bounds - tableau.setLowerBound( 0, -0.3 ); - tableau.setUpperBound( 0, 0.1 ); - tableau.setLowerBound( 1, -0.1 ); - tableau.setUpperBound( 1, 0.2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); @@ -5034,59 +8574,114 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - // Initialize + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, 0.7, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), - Tightening( 3, -0.8, Tightening::LB ), Tightening( 3, 0.9, Tightening::UB ), - Tightening( 4, -0.5, Tightening::LB ), Tightening( 4, 0.5, Tightening::UB ), - Tightening( 5, -0.6, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), - - Tightening( 6, -0.88, Tightening::LB ), Tightening( 6, 0.99, Tightening::UB ), - Tightening( 7, -0.3, Tightening::LB ), Tightening( 7, 0.3, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 8, 0.82, Tightening::LB ), Tightening( 8, 3.29, Tightening::UB ), - Tightening( 9, -1.29, Tightening::LB ), Tightening( 9, 1.18, Tightening::UB ), - - Tightening( 10, -4.2441, Tightening::LB ), Tightening( 10, 3.8822, Tightening::UB ), - - Tightening( 11, -3.8822, Tightening::LB ), Tightening( 11, 4.2441, Tightening::UB ), + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_interval_arithmetic_bound_propagation_abs_and_relu_constraints() + void test_sbt_absolute_values_positive_and_not_fixed() { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithAbsAndRelu( nlr ); + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; - double large = 1000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -5097,58 +8692,116 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - nlr.setTableau( &tableau ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Initialize + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Layer 1: - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - Tightening( 10, -5, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + First absolute value is undecided, bounds are concretized. + Second absolute value is active, bounds surive the activation - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_absolute_values_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); @@ -5160,66 +8813,114 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - // Initialize + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + Layer 1: - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x2 is eliminated, everything set to -3 - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - Tightening( 10, -10, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + Second absolute value is positive, bounds surive the activation - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + x4: all set to 3 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), } ); - + + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_interval_arithmetic_bound_propagation_round_and_sign_constraints() + void test_sbt_signs_positive_and_not_fixed() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; - populateNetworkWithRoundAndSign( nlr ); - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); - // Initialize the bounds - tableau.setLowerBound( 0, 1.4 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, -1.4 ); - tableau.setUpperBound( 1, 2.1 ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; - double large = 1000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -5230,58 +8931,119 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - nlr.setTableau( &tableau ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Initialize + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is undecided, bounds are concretized. + Second sign is active, bounds become constant 1 + Coefficient (first Sign, lower): 2/12 = 1/6. + Coefficient (first Sign, upper): -2/-4 = 1/2. + + x4 range: [-1, 1] + x4.lb = 1/6 ( 2x0 + 3x1 - 15 ) - 1 = 2/6 x0 + 3/6 x1 - 21/6 + x4.ub = 1/2 ( 2x0 + 3x1 - 15 ) + 1 = x0 + 1.5x1 - 6.5 + + x5 range: [1, 1] + x5.lb = 1 + x5.ub = 1 + + Layer 2: + + x6.lb = 1 ( 2/6 x0 + 3/6 x1 - 21/6 ) - 1 ( 1 ) = 2/6 x0 + 3/6 x1 - 27/6 : [-16/6, 0] + x6.ub = 1 ( x0 + 1.5x1 - 6.5 ) - 1 ( 1 ) = x0 + 1.5x1 - 7.5 : [-2, 6] + + x6 range: [-8/3, 6] + */ List expectedBounds( { - Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), - Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2.6667, Tightening::LB ), + Tightening( 6, 6, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_signs_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), - - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - // Change the current bounds - tableau.setLowerBound( 0, -3.1 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, 1.4 ); - tableau.setUpperBound( 1, 2.1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); @@ -5293,507 +9055,544 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - // Initialize + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), - Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), - Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + Layer 1: - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + x2 is eliminated, everything set to -3 + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is negative, bounds become constant -1 + Second sign is positive, bounds become constant 1 + + x4: all set to -1 + + x5: all set to 1 + + Layer 2: + + x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 + x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, -1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_interval_arithmetic_bound_propagation_leaky_relu_and_sigmoid_constraints() + void test_sbt_leaky_relu() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithLeakyReluAndSigmoid( nlr ); - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + nlr.setTableau( &tableau ); + populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 - // Initialize the bounds tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); + tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + tableau.setUpperBound( 1, 1 ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - nlr.setTableau( &tableau ); + /* + Input ranges: - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x0: [-1, 1] + x1: [-1, 1] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Layer 1: - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] - Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Bias: ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 - Tightening( 9, 0.0573, Tightening::LB ), Tightening( 9, 0.9999, Tightening::UB ), - Tightening( 11, 0.0219, Tightening::LB ), Tightening( 11, 0.9999, Tightening::UB ), - - Tightening( 12, 0.0573, Tightening::LB ), Tightening( 12, 0.9999, Tightening::UB ), - Tightening( 13, 0.1229, Tightening::LB ), Tightening( 13, 3.9996, Tightening::UB ), - } ); + x4.lb = x0 + x1 + x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6x0 + 0.6x1 + 0.8 + x4 range: [-0.4, 2] + + x5.lb = x0 - x1 + x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6x0 - 0.6x1 + 0.8 + x5 range: [-0.4, 2] + + Layer 2: - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + x6.lb = 1 ( x0 + x1 ) + 1 ( x0 - x1 ) = 2x0 : [-2, 2] + x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2x0 + 1.6 : [0.4, 2.8] + x6 range: [-2, 2.8] + + x7.lb = 1 ( x0 + x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( x0 - x1 ) = -0.4x0 + 1.6x1 + 0.8 : [-1.2, 2.8] + x7 range: [-2.8, 2.8] + + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient (first LeakyReLU): ( 2.8 - 0.2*-2 )/( 2.8--2 ) = 3.2/4.8 = 10/15 + Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2 / ( 2.8--2 ) = 14/15 + + Coefficient (second LeakyReLU): ( 2.8 - 0.2*-2.8 )/( 2.8--2.8 ) = 3.36/5.6 = 0.6 + Bias (second LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2.8 / ( 2.8--2.8 ) = 1.12 + + x8.lb = 2x0 + x8.ub = 10/15 ( 1.2x0 + 1.6 ) + 14/15 = 0.8x0 + 2 + x8 range: [-0.4, 2.8] + + x9.lb = 0.4x0 + 1.6x1 - 0.8 + x9.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 + x9 range: [-0.56, 2.8] - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + Layer 3: - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + x10.lb = 1 ( 0.4x0 + 1.6x1 - 0.8 ) + 1 ( 2x0 ) + 1 = 2.4x0 + 1.6x1 + 0.2 : [-3.8, 5.2] + x10.ub = 1 ( -0.24 x0 + 0.96 x1 + 1.6 ) + 1 ( 0.8x0 + 2 ) + 1 = 0.56x0 + 0.96x1 + 4.6 : [3.08, 6.12] + x10 range: [-3.8, 6.12] + + x11.lb = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x11.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 : [0.4, 2.8] + x11 range: [-2.8, 2.8] + + */ - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Tightening( 4, -0.4, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -0.4, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), + Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), - Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -0.4, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), + Tightening( 9, -0.56, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), - Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), - Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), - - Tightening( 9, 0.0323, Tightening::LB ), Tightening( 9, 0.9992, Tightening::UB ), - Tightening( 11, 0.0392, Tightening::LB ), Tightening( 11, 0.9993, Tightening::UB ), + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 6.12, Tightening::UB ), + Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) - Tightening( 12, 0.0323, Tightening::LB ), Tightening( 12, 0.9992, Tightening::UB ), - Tightening( 13, 0.1498, Tightening::LB ), Tightening( 13, 3.9972, Tightening::UB ), - } ); + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_interval_arithmetic_bound_propagation_softmax_and_max_constraints() + + void test_sbt_sigmoids_and_round() { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSoftmaxAndMax( nlr ); - + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); + nlr.setTableau( &tableau ); + populateNetworkSBTSigmoidsAndRound( nlr, tableau ); - // Initialize the bounds tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - nlr.setTableau( &tableau ); + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + // Layer 1 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + // Layer 2 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), - Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + // Layer 3 + /* + Double-check with Python + --- + from math import exp as e + def g(x): + return 1 / (1 + e(-x)) - Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), - - Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - - Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ) - } ); + def g_prime(x): + return g(x) * (1 - g(x)) - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + def lam(l, u): + return (g(u) - g(l)) / (u - l) - // Change the current bounds - tableau.setLowerBound( 0, -3 ); + def lam_prime(l, u): + return min(g_prime(l), g_prime(u)) + + l3 = l4 = -2 + u3 = u4 = 2 + l5 = l6 = g(-2) + u5 = u6 = g(2) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) + x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) + x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) + x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) + print(x7_l) + print(x7_u) + print(x8_l) + print(x8_u) + --- + [output]: + 0.4483930148512481 + 1.5516069851487517 + -0.5516069851487517 + 0.5516069851487517 + */ + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); + + // Layer 4 + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + } + + void test_sbt_max_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - // Initialize + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + x0: [-1, 1] + x1: [-1, 2] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 3] + x2.ub = x0 + x1 : [-2, 3] + + x3.lb = x0 - x1 : [-3, 2] + x3.ub = x0 - x1 : [-3, 2] + + Both ReLUs are undecided, bounds are concretized. + Coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6 + Coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 + + x4.lb = 0.6 ( x0 + x1 ) = 0.6x0 + 0.6x1 + x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 + x4 range: [0, 3] + + x5.lb = 0.4 ( x0 - x1 ) = 0.4x0 + 0.4x1 + x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 + x5 range: [0, 2] + + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x4, and its upper bound is constant 3. + + x6.lb = 0.6x0 + 0.6x1 : [-1.2, 1.8] + x6.ub = 3 : [3, 3] + x6 range: [-1.2, 3] + + Layer 3: + + x7.lb = 2 ( 0.6x0 + 0.6x1 ) = 1.2x0 + 1.8x1 : [-2.4, 3.6] + x7.ub = 2 ( 3 ) = 6 : [6, 6] + x7 range: [-2.4, 6] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 3, Tightening::UB ), + Tightening( 3, -3, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2.4, Tightening::LB ), + Tightening( 7, 6, Tightening::UB ), - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.0654, Tightening::LB ), Tightening( 9, 2.9933, Tightening::UB ), - - Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), - - Tightening( 11, -2.9933, Tightening::LB ), Tightening( 11, -0.0654, Tightening::UB ) } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_interval_arithmetic_bound_propagation_relu_and_bilinear_constraints() + + void test_sbt_max_fixed() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; - populateNetworkWithReluAndBilinear( nlr ); - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -3 ); + tableau.setUpperBound( 1, -2 ); - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + /* + Input ranges: - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), - - Tightening( 11, -49, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - } ); + x0: [1, 2] + x1: [-3, -2] - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + Layer 1: - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + x2.lb = x0 + x1 : [-2, 0] + x2.ub = x0 + x1 : [-2, 0] - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + x3.lb = x0 - x1 : [3, 5] + x3.ub = x0 - x1 : [3, 5] - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + First ReLU is negative, bounds become constant 0 + Second ReLU is positive, bounds survive the activation - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x4: all set to 0 + + x5.lb = x0 - x1 : [3, 5] + x5.ub = x0 - x1 : [3, 5] + + Max is fixed because x5.lb > x4.ub, it inherits x5's bounds + + x6.lb = x0 - x1 : [3, 5] + x6.ub = x0 - x1 : [3, 5] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Layer 3: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + */ - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 0, Tightening::UB ), + Tightening( 3, 3, Tightening::LB ), + Tightening( 3, 5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 3, Tightening::LB ), + Tightening( 5, 5, Tightening::UB ), + Tightening( 6, 3, Tightening::LB ), + Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 6, Tightening::LB ), + Tightening( 7, 10, Tightening::UB ), - Tightening( 10, -14, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), - - Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_sbt_relus_all_active() + + void test_sbt_softmax1() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + } - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] + void test_sbt_softmax2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); - Layer 1: + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + /* + Input ranges: - Both ReLUs active, bound survive through activations: + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] + } ); - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); - Layer 2: + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] - */ + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + /* + Input ranges: - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), - } ); + } ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } } - void test_sbt_relus_active_and_inactive() + void test_sbt_softmax3() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); + populateNetworkSBTSoftmax2( nlr, tableau ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -5802,45 +9601,40 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite /* Input ranges: - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is inactive, bounds get zeroed - Second ReLU is active, bounds surive the activation - - x4.lb = 0 - x4.ub = 0 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] */ - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + List expectedBounds({ Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), + Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.8668, Tightening::LB ), + Tightening( 8, 0.8668, Tightening::UB ), + Tightening( 9, 0.9820, Tightening::LB ), + Tightening( 9, 0.9820, Tightening::UB ), + Tightening( 10, 0.1173, Tightening::LB ), + Tightening( 10, 0.1173, Tightening::UB ), + Tightening( 11, 0.0179, Tightening::LB ), + Tightening( 11, 0.0179, Tightening::UB ), + Tightening( 12, 0.0159, Tightening::LB ), + Tightening( 12, 0.0159, Tightening::UB ), + Tightening( 13, 0.9470, Tightening::LB ), + Tightening( 13, 0.9470, Tightening::UB ), + Tightening( 14, -0.9470, Tightening::LB ), + Tightening( 14, -0.9470, Tightening::UB ), + Tightening( 15, 1.0253, Tightening::LB ), + Tightening( 15, 1.0253, Tightening::UB ), + Tightening( 16, -1.0253, Tightening::LB ), + Tightening( 16, -1.0253, Tightening::UB ) - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), } ); List bounds; @@ -5848,99 +9642,64 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_sbt_relus_active_and_not_fixed() + void test_softmax_bounds_er() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is undecided, bound is concretized. - Coefficient: 12/(12--4) = 12/16 = 0.75 - Second ReLU is active, bounds surive the activation - - x4 range: [0, 12] - x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 - x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = 0.5x0 + 1.25x1 - 11.25 - x6.ub = 0.5x0 + 1.25x1 - 8.25 - - x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] - */ + Vector inputLb = { -1, 0, 1 }; + Vector inputUb = { 0, 2, 4 }; + Vector input = { -0.5, 1, 2.5 }; - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + double value = NLR::DeepPolySoftmaxElement::ERLowerBound( input, inputLb, inputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.0114799, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.00563867, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.000838421, 0.00001 ) ); - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 1, Tightening::UB ), - } ); + Vector outputLb = { 0.2, 0, 0 }; + Vector outputUb = { 0.4, 0.1, 0.1 }; - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + value = NLR::DeepPolySoftmaxElement::ERUpperBound( input, outputLb, outputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, -1.44538, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 1.96538, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.358535, 0.00001 ) ); } - void test_sbt_relus_active_and_externally_fixed() + void test_softmax_bounds_lse1() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Vector inputLb = { -1, 0, 1 }; + Vector inputUb = { 0, 2, 3 }; + Vector input = { -0.5, 1, 2 }; + double value = NLR::DeepPolySoftmaxElement::LSELowerBound( input, inputLb, inputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.00703444, 0.001 ) ); + Vector outputLb = { 0.2, 0, 0 }; + Vector outputUb = { 0.4, 0.1, 0.1 }; + value = NLR::DeepPolySoftmaxElement::LSEUpperBound( input, outputLb, outputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.164165, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.272204, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.073207, 0.00001 ) ); + } + + void test_sbt_bilinear() + { NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); + populateNetworkSBTBilinear( nlr, tableau ); - // However, one of the ReLU's variables has been eliminated - nlr.eliminateVariable( 2, -3 ); + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -5949,350 +9708,238 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite /* Input ranges: - x0: [4, 6] - x1: [1, 5] - + x0: [1, 2] + x1: [-2, 1] + Layer 1: - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + x2.lb = x0 - 2x1 : [-1, 6] + x2.ub = x0 - 2x1 : [-1, 6] - First ReLU is inactive (set externally), bounds get zeroed - Second ReLU is active, bounds surive the activation + x3.lb = x0 + x1 : [-1, 3] + x3.ub = x0 + x1 : [-1, 3] - x4.lb = 0 - x4.ub = 0 + Coefficients for bilinear layer: + Lower bound: + alpha_l = x3.lb = -1 + beta = x2.lb = -1 + gamma_l = -x2.lb * x3.lb = --1 * -1 = -1 + + Upper bound: + alpha_u = x3.ub = 3 + beta = x2.lb = -1 + gamma_u = -x2.lb * x3.ub = --1 * 3 = 3 - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + x4.lb = -1 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + -1 = -2x0 + x1 - 1 : [-7, -2] + x4.ub = 3 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + 3 = 2x0 - 7x1 + 3 : [0, 21] + x4 range: [-7, 21] - Layer 2: + Layer 3: - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] + x7.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] + x7.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] + x4 range: [-21, 5] */ - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), - } ); + List expectedBounds( { Tightening( 2, -1, Tightening::LB ), + Tightening( 2, 6, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 3, Tightening::UB ), + Tightening( 4, -7, Tightening::LB ), + Tightening( 4, 21, Tightening::UB ), + Tightening( 5, -21, Tightening::LB ), + Tightening( 5, 7, Tightening::UB ) } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_sbt_relu_residual1() + void test_concretize_input_assignment() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTReluResidual1( nlr, tableau ); - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); + populateNetwork( nlr ); - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + // With ReLUs, Inputs are zeros, only biases count + tableau.nextValues[0] = 0; + tableau.nextValues[1] = 0; - /* - Input ranges: + Map assignment; - x0: [-1, 1] + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - Layer 1: + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); - x1.lb = x0 : [-1, 1] - x1.ub = x0 : [-1, 1] + TS_ASSERT( assignment.size() == 14 ); + TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 4 ) ); - ReLU is undecided, bound is concretized. - Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - - x2.lb = 0.5x0 - x2.ub = 0.5x0 + 0.5 - x2 range: [0, 1] - - Layer 2 (with residual from x0): + // With ReLUs, case 1 + tableau.nextValues[0] = 1; + tableau.nextValues[1] = 1; - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] - x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] - x3 range: [-1, 2.5] - - ReLU is undecided, bound is concretized. - Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. - - x4.lb = 0 - x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] - x4 range: [0, 2.5] + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - Layer 3 (with residual from x1): + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); - x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] - x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] + TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 1 ) ); - x5 range: [-2, 4] - */ + // With ReLUs, case 2 + tableau.nextValues[0] = 1; + tableau.nextValues[1] = 2; - List expectedBounds( { - Tightening( 1, -1, Tightening::LB ), - Tightening( 1, 1, Tightening::UB ), - Tightening( 2, 0, Tightening::LB ), - Tightening( 2, 1, Tightening::UB ), - Tightening( 3, -1, Tightening::LB ), - Tightening( 3, 2.5, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 2.5, Tightening::UB ), - Tightening( 5, 2, Tightening::LB ), - Tightening( 5, 5.5, Tightening::UB ), - } ); + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); + + TS_ASSERT( FloatUtils::areEqual( assignment[12], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 0 ) ); } - - void test_sbt_relu_residual2() + + + void test_obtain_bound_from_ipq() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTReluResidual2( nlr, tableau ); + populateNetwork( nlr ); - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); + Query query; + query.setNumberOfVariables( 14 ); - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - /* - Input ranges: + // Initialize the bounds + query.setLowerBound( 0, -1 ); + query.setUpperBound( 0, 1 ); + query.setLowerBound( 1, -1 ); + query.setUpperBound( 1, 1 ); - x0: [-1, 1] + double large = 1000; + query.setLowerBound( 2, -large ); + query.setUpperBound( 2, large ); + query.setLowerBound( 3, -large ); + query.setUpperBound( 3, large ); + query.setLowerBound( 4, -large ); + query.setUpperBound( 4, large ); + query.setLowerBound( 5, -large ); + query.setUpperBound( 5, large ); + query.setLowerBound( 6, -large ); + query.setUpperBound( 6, large ); + query.setLowerBound( 7, -large ); + query.setUpperBound( 7, large ); + query.setLowerBound( 8, -large ); + query.setUpperBound( 8, large ); + query.setLowerBound( 9, -large ); + query.setUpperBound( 9, large ); + query.setLowerBound( 10, -large ); + query.setUpperBound( 10, large ); + query.setLowerBound( 11, -large ); + query.setUpperBound( 11, large ); + query.setLowerBound( 12, -large ); + query.setUpperBound( 12, large ); + query.setLowerBound( 13, -large ); + query.setUpperBound( 13, large ); - Layer 1: + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds( query ) ); - x1.lb = x0 : [-1, 1] - x1.ub = x0 : [-1, 1] + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - ReLU is undecided, bound is concretized. - Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - - x2.lb = 0.5x0 - x2.ub = 0.5x0 + 0.5 - x2 range: [0, 1] - - Layer 2 (with residual from x0): + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] - x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] - x3 range: [-1, 2.5] - - ReLU is undecided, bound is concretized. - Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. - - x4.lb = 0 - x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] - x4 range: [0, 2.5] + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Layer 3 (with residual from x0): + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - x5.lb = 3 ( 0 ) + 1 ( x0 ) + 1 = 1x0 + 1 : [0, 2] - x5.ub = 3 ( -15/14 x0 + 20/14 ) + 1 ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] - x5 range: [0, 7.5] - - Layer 4: - x6.lb = 1x0 + 1 : [0, 2] - x6.ub = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] - x6 range: [0, 7.5] - */ + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - List expectedBounds( { - Tightening( 1, -1, Tightening::LB ), - Tightening( 1, 1, Tightening::UB ), - Tightening( 2, 0, Tightening::LB ), - Tightening( 2, 1, Tightening::UB ), - Tightening( 3, -1, Tightening::LB ), - Tightening( 3, 2.5, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 2.5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 7.5, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), - Tightening( 6, 7.5, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : expectedBounds ) + TS_ASSERT( bounds.exists( bound ) ); } - void test_sbt_relu_reindex() + void test_backwards_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTReluReindex( nlr, tableau ); + populateNetworkBackwardReLU( nlr, tableau ); - tableau.setLowerBound( 0, -1 ); + tableau.setLowerBound( 0, 0 ); tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); + tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - // Invoke SBT + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - x1: [-1, 1] - - Layer 1: - - x2.lb = x0 + x1 : [-2, 2] - x2.ub = x0 + x1 : [-2, 2] - - x3.lb = x0 - x1 : [-2, 2] - x3.ub = x0 - x1 : [-2, 2] - - Both ReLUs are undecided, bounds are concretized. - Coefficient: 2/( 2--2 ) = 2/4 = 0.5 - - x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 - x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 - x4 range: [0, 2] - - x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 - x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 - x5 range: [0, 2] - - Layer 2: - - x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] - x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] - x6 range: [-1, 3] - - x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] - x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] - x7 range: [-2, 2] - - Both ReLUs are undecided, bounds are concretized. - Coefficient (first ReLU, lower): 1/( 1--1 ) = 1/2 = 0.5 - Coefficient (first ReLU, upper): 1 (propagated as is) - Coefficient (second ReLU, lower): 0 (bound is zero) - Coefficient (second ReLU, upper): 2/( 2--2 ) = 2/4 = 0.5 - - x8.lb = 0.5 ( x0 ) = 0.5x0 - x8.ub = x0 + 2 - x8 range: [0, 3] - - x9.lb = 0 - x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 - x9 range: [0, 2] - - Layer 3: - - x10.lb = 1 ( 0.5x0 ) + 1 ( 0 ) + 1 = 0.5x0 + 1 : [0.5, 1.5] - x10.ub = 1 ( x0 + 2 ) + 1 ( 0.5x1 + 1.5 ) + 1 = x0 + 0.5x1 + 4.5 : [3, 6] - x10 range: [0.5, 6] - - x11.lb = 0.5x1 - 0.5 - x11.ub = 0.5x1 + 1.5 - x11 range: [0, 2] - - */ - + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + List expectedBounds( - { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 10, 0.5, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), } ); - + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_abs_all_positive() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { Tightening( 8, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - // Very loose bounds for neurons except inputs double large = 1000000; - tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -6303,112 +9950,138 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - // Invoke SBT + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Both absolute values positive, bound survive through activations: - - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] - */ + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), - } ); + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), - List bounds; + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - - void test_sbt_abs_positive_and_negative() + + void test_backwards_relu2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + List expectedBounds( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 19, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Very loose bounds for neurons except inputs - double large = 1000000; + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); + double large = 1000000; tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); tableau.setLowerBound( 4, -large ); @@ -6417,114 +10090,155 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); - - // Invoke SBT + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First absolute value is negative, bounds get flipped - Second absolute value is positive, bounds surive the activation - - x4.lb = -2x0 -3x1 + 30 : [3, 19] - x4.ub = -2x0 -3x1 + 30 : [3, 19] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] - x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] - */ - - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 19, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 14, Tightening::UB ), - } ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { Tightening( 7, 0, Tightening::LB ), - List bounds; + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 20, 0, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - void test_sbt_absolute_values_positive_and_not_fixed() + void test_backwards_sigmoid() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid( nlr, tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), + Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), + Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), + Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - // Very loose bounds for neurons except inputs double large = 1000000; - tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -6535,117 +10249,284 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - // Invoke SBT + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First absolute value is undecided, bounds are concretized. - Second absolute value is active, bounds surive the activation - - x4 range: [0, 12] - x4.lb = 0 - x4.ub = 12 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Layer 2: + Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), + Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 + 12 : [ 1, 7] + Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), + Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), - x6 range: [-11, 7] - */ + Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), + Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), + Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sigmoid2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid2( nlr, tableau ); - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, 7, Tightening::UB ), - } ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), + Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), + + Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), + Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), + Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), + + Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), + Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), + Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), + + Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), + Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), + + Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), + Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), + + Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); - void test_sbt_absolute_values_active_and_externally_fixed() + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), + Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), + Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), + + Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), + Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), + Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), + + Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), + Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), + Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), + + Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), + Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), + + Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), + Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), + + Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_abs() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); + populateNetworkBackwardAbs( nlr, tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - // Very loose bounds for neurons except inputs double large = 1000000; - tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -6656,116 +10537,133 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, the weighted sum variable has been eliminated - nlr.eliminateVariable( 2, -3 ); - // Invoke SBT + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2 is eliminated, everything set to -3 - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Second absolute value is positive, bounds surive the activation - - x4: all set to 3 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 + 3 : [-8, -2] - x6.ub = - x0 - x1 + 3 : [-8, -2] - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, -2, Tightening::UB ), - } ); + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), - List bounds; + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - void test_sbt_signs_positive_and_not_fixed() + void test_backwards_abs2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); + populateNetworkBackwardAbs2( nlr, tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); - // Very loose bounds for neurons except inputs double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); tableau.setLowerBound( 4, -large ); @@ -6774,120 +10672,149 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First sign is undecided, bounds are concretized. - Second sign is active, bounds become constant 1 - Coefficient (first Sign, lower): 2/12 = 1/6. - Coefficient (first Sign, upper): -2/-4 = 1/2. - - x4 range: [-1, 1] - x4.lb = 1/6 ( 2x0 + 3x1 - 15 ) - 1 = 2/6 x0 + 3/6 x1 - 21/6 - x4.ub = 1/2 ( 2x0 + 3x1 - 15 ) + 1 = x0 + 1.5x1 - 6.5 - - x5 range: [1, 1] - x5.lb = 1 - x5.ub = 1 - - Layer 2: - - x6.lb = 1 ( 2/6 x0 + 3/6 x1 - 21/6 ) - 1 ( 1 ) = 2/6 x0 + 3/6 x1 - 27/6 : [-16/6, 0] - x6.ub = 1 ( x0 + 1.5x1 - 6.5 ) - 1 ( 1 ) = x0 + 1.5x1 - 7.5 : [-2, 6] - - x6 range: [-8/3, 6] - */ - - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), - Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), - Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2.6667, Tightening::LB ), - Tightening( 6, 6, Tightening::UB ), - } ); - - List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), + + Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), + + Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - - void test_sbt_signs_active_and_externally_fixed() + + void test_backwards_round() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); + populateNetworkBackwardRound( nlr, tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { Tightening( 11, -4, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - // Very loose bounds for neurons except inputs double large = 1000000; - tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -6898,414 +10825,650 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, the weighted sum variable has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2 is eliminated, everything set to -3 - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First sign is negative, bounds become constant -1 - Second sign is positive, bounds become constant 1 - - x4: all set to -1 - - x5: all set to 1 - - Layer 2: - - x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 - x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), - Tightening( 4, -1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), - Tightening( 5, 1, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -2, Tightening::LB ), - Tightening( 6, -2, Tightening::UB ), - } ); + Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), - List bounds; + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { Tightening( 7, -10, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + + Tightening( 11, -14, Tightening::LB ), Tightening( 11, 11, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - void test_sbt_leaky_relu() + void test_backwards_round2() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 + populateNetworkBackwardRound2( nlr, tableau ); tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - x1: [-1, 1] - - Layer 1: - - x2.lb = x0 + x1 : [-2, 2] - x2.ub = x0 + x1 : [-2, 2] - - x3.lb = x0 - x1 : [-2, 2] - x3.ub = x0 - x1 : [-2, 2] - - Both LeakyReLUs are undecided, bounds are concretized. - Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 - Bias: ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 - - x4.lb = x0 + x1 - x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6x0 + 0.6x1 + 0.8 - x4 range: [-0.4, 2] - - x5.lb = x0 - x1 - x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6x0 - 0.6x1 + 0.8 - x5 range: [-0.4, 2] - - Layer 2: - - x6.lb = 1 ( x0 + x1 ) + 1 ( x0 - x1 ) = 2x0 : [-2, 2] - x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2x0 + 1.6 : [0.4, 2.8] - x6 range: [-2, 2.8] - - x7.lb = 1 ( x0 + x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] - x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( x0 - x1 ) = -0.4x0 + 1.6x1 + 0.8 : [-1.2, 2.8] - x7 range: [-2.8, 2.8] - - Both LeakyReLUs are undecided, bounds are concretized. - Coefficient (first LeakyReLU): ( 2.8 - 0.2*-2 )/( 2.8--2 ) = 3.2/4.8 = 10/15 - Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2 / ( 2.8--2 ) = 14/15 - - Coefficient (second LeakyReLU): ( 2.8 - 0.2*-2.8 )/( 2.8--2.8 ) = 3.36/5.6 = 0.6 - Bias (second LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2.8 / ( 2.8--2.8 ) = 1.12 - - x8.lb = 2x0 - x8.ub = 10/15 ( 1.2x0 + 1.6 ) + 14/15 = 0.8x0 + 2 - x8 range: [-0.4, 2.8] - - x9.lb = 0.4x0 + 1.6x1 - 0.8 - x9.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 - x9 range: [-0.56, 2.8] - - Layer 3: - - x10.lb = 1 ( 0.4x0 + 1.6x1 - 0.8 ) + 1 ( 2x0 ) + 1 = 2.4x0 + 1.6x1 + 0.2 : [-3.8, 5.2] - x10.ub = 1 ( -0.24 x0 + 0.96 x1 + 1.6 ) + 1 ( 0.8x0 + 2 ) + 1 = 0.56x0 + 0.96x1 + 4.6 : [3.08, 6.12] - x10 range: [-3.8, 6.12] - - x11.lb = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] - x11.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 : [0.4, 2.8] - x11 range: [-2.8, 2.8] - - */ + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + List expectedBounds( - { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.4, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -0.4, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), - Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), - - Tightening( 8, -0.4, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), - Tightening( 9, -0.56, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), - - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 6.12, Tightening::UB ), - Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) - + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), + Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), + Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), + + Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), + Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), + + Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), + Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), + + Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), } ); - + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_sigmoids_and_round() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSigmoidsAndRound( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); - // Invoke SBT + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), + Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), + Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), + Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), + + Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - // Layer 1 - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); - - // Layer 2 - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); - - // Layer 3 - /* - Double-check with Python - --- - from math import exp as e - def g(x): - return 1 / (1 + e(-x)) - - def g_prime(x): - return g(x) * (1 - g(x)) - - def lam(l, u): - return (g(u) - g(l)) / (u - l) - - def lam_prime(l, u): - return min(g_prime(l), g_prime(u)) - - l3 = l4 = -2 - u3 = u4 = 2 - l5 = l6 = g(-2) - u5 = u6 = g(2) - lambda7 = lam(l3, u3) - lambda7_prime = lam_prime(l3, u3) - lambda8 = lam(l4, u4) - lambda8_prime = lam_prime(l4, u4) - x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) - x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) - x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) - x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) - print(x7_l) - print(x7_u) - print(x8_l) - print(x8_u) - --- - [output]: - 0.4483930148512481 - 1.5516069851487517 - -0.5516069851487517 - 0.5516069851487517 - */ - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); - - // Layer 4 - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - void test_sbt_max_not_fixed() + void test_backwards_sign() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTMax( nlr, tableau ); + populateNetworkBackwardSign( nlr, tableau ); - tableau.setLowerBound( 0, -1 ); + tableau.setLowerBound( 0, 0 ); tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); - // Invoke SBT + + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - /* - Input ranges: + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - x0: [-1, 1] - x1: [-1, 2] + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Layer 1: + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - x2.lb = x0 + x1 : [-2, 3] - x2.ub = x0 + x1 : [-2, 3] + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - x3.lb = x0 - x1 : [-3, 2] - x3.ub = x0 - x1 : [-3, 2] + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - Both ReLUs are undecided, bounds are concretized. - Coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6 - Coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 - x4.lb = 0.6 ( x0 + x1 ) = 0.6x0 + 0.6x1 - x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 - x4 range: [0, 3] - - x5.lb = 0.4 ( x0 - x1 ) = 0.4x0 + 0.4x1 - x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 - x5 range: [0, 2] - - Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub - Max inherits lower bound from x4, and its upper bound is constant 3. - - x6.lb = 0.6x0 + 0.6x1 : [-1.2, 1.8] - x6.ub = 3 : [3, 3] - x6 range: [-1.2, 3] + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Layer 3: + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - x7.lb = 2 ( 0.6x0 + 0.6x1 ) = 1.2x0 + 1.8x1 : [-2.4, 3.6] - x7.ub = 2 ( 3 ) = 6 : [6, 6] - x7 range: [-2.4, 6] - */ + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - List expectedBounds( { - Tightening( 2, -2, Tightening::LB ), - Tightening( 2, 3, Tightening::UB ), - Tightening( 3, -3, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), - Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -2.4, Tightening::LB ), - Tightening( 7, 6, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); - } ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - void test_sbt_max_fixed() + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_leaky_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTMax( nlr, tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -3 ); - tableau.setUpperBound( 1, -2 ); + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); - // Invoke SBT + + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - /* - Input ranges: + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - x0: [1, 2] - x1: [-3, -2] + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Layer 1: + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - x2.lb = x0 + x1 : [-2, 0] - x2.ub = x0 + x1 : [-2, 0] + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { Tightening( 4, -0.1, Tightening::LB ), + + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - x3.lb = x0 - x1 : [3, 5] - x3.ub = x0 - x1 : [3, 5] + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - First ReLU is negative, bounds become constant 0 - Second ReLU is positive, bounds survive the activation - x4: all set to 0 - - x5.lb = x0 - x1 : [3, 5] - x5.ub = x0 - x1 : [3, 5] - - Max is fixed because x5.lb > x4.ub, it inherits x5's bounds - - x6.lb = x0 - x1 : [3, 5] - x6.ub = x0 - x1 : [3, 5] + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Layer 3: + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] - x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] - */ + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), - List expectedBounds( { - Tightening( 2, -2, Tightening::LB ), - Tightening( 2, 0, Tightening::UB ), - Tightening( 3, 3, Tightening::LB ), - Tightening( 3, 5, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 3, Tightening::LB ), - Tightening( 5, 5, Tightening::UB ), - Tightening( 6, 3, Tightening::LB ), - Tightening( 6, 5, Tightening::UB ), - Tightening( 7, 6, Tightening::LB ), - Tightening( 7, 10, Tightening::UB ), + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), - } ); + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), - List bounds; + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - - void test_sbt_softmax1() + + void test_backwards_leaky_relu2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax( nlr, tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); @@ -7314,412 +11477,704 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - } - void test_sbt_softmax2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - { - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 1.000001 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 1.000001 ); - tableau.setLowerBound( 2, 1 ); - tableau.setUpperBound( 2, 1.000001 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 1.0001] - x1: [1, 1.0001] - x2: [1, 1.0001] - */ - List expectedBounds( { Tightening( 3, 2, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 0, Tightening::UB ), - Tightening( 6, 0.2595, Tightening::LB ), - Tightening( 6, 0.2595, Tightening::UB ), - Tightening( 7, 0.7054, Tightening::LB ), - Tightening( 7, 0.7054, Tightening::UB ), - Tightening( 8, 0.0351, Tightening::LB ), - Tightening( 8, 0.0351, Tightening::UB ), - Tightening( 9, 1, Tightening::LB ), - Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), - Tightening( 10, -1, Tightening::UB ) - + List expectedBounds( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - { - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 1.000001 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 1.000001 ); - tableau.setLowerBound( 2, 1 ); - tableau.setUpperBound( 2, 1.000001 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 1.0001] - x1: [1, 1.0001] - x2: [1, 1.0001] - */ - List expectedBounds( { Tightening( 3, 2, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 0, Tightening::UB ), - Tightening( 6, 0.2595, Tightening::LB ), - Tightening( 6, 0.2595, Tightening::UB ), - Tightening( 7, 0.7054, Tightening::LB ), - Tightening( 7, 0.7054, Tightening::UB ), - Tightening( 8, 0.0351, Tightening::LB ), - Tightening( 8, 0.0351, Tightening::UB ), - Tightening( 9, 1, Tightening::LB ), - Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), - Tightening( 10, -1, Tightening::UB ) - + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 14, -0.9, Tightening::LB ), + Tightening( 15, -0.89, Tightening::LB ), + Tightening( 16, -0.77, Tightening::LB ), + + Tightening( 19, -2.3133, Tightening::LB ), + Tightening( 20, -1.2, Tightening::LB ), } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), + Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 14, -1.1, Tightening::LB ), + Tightening( 15, -2.1771, Tightening::LB ), + Tightening( 16, -1.15, Tightening::LB ), + + Tightening( 19, -5.6259, Tightening::LB ), + Tightening( 20, -1.9, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - - void test_sbt_softmax3() + + void test_backwards_softmax_and_max() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax2( nlr, tableau ); + populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 1.00001 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 1.00001 ); - tableau.setLowerBound( 2, 1 ); - tableau.setUpperBound( 2, 1.00001 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + - x0: [1, 1.0001] - x1: [1, 1.0001] - x2: [1, 1.0001] - */ + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - List expectedBounds({ Tightening( 3, 2, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 0, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), - Tightening( 6, -1, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), - Tightening( 7, -2, Tightening::UB ), - Tightening( 8, 0.8668, Tightening::LB ), - Tightening( 8, 0.8668, Tightening::UB ), - Tightening( 9, 0.9820, Tightening::LB ), - Tightening( 9, 0.9820, Tightening::UB ), - Tightening( 10, 0.1173, Tightening::LB ), - Tightening( 10, 0.1173, Tightening::UB ), - Tightening( 11, 0.0179, Tightening::LB ), - Tightening( 11, 0.0179, Tightening::UB ), - Tightening( 12, 0.0159, Tightening::LB ), - Tightening( 12, 0.0159, Tightening::UB ), - Tightening( 13, 0.9470, Tightening::LB ), - Tightening( 13, 0.9470, Tightening::UB ), - Tightening( 14, -0.9470, Tightening::LB ), - Tightening( 14, -0.9470, Tightening::UB ), - Tightening( 15, 1.0253, Tightening::LB ), - Tightening( 15, 1.0253, Tightening::UB ), - Tightening( 16, -1.0253, Tightening::LB ), - Tightening( 16, -1.0253, Tightening::UB ) + Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), + Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), + Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), - } ); + Tightening( 8, -0.1519, Tightening::LB ), Tightening( 8, 1.4775, Tightening::UB ), + Tightening( 9, 0.6614, Tightening::LB ), Tightening( 9, 2.3899, Tightening::UB ), + Tightening( 10, 0.6614, Tightening::LB ), Tightening( 10, 2.3899, Tightening::UB ), + + Tightening( 11, -2.3899, Tightening::LB ), Tightening( 11, -0.6614, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_softmax_bounds_er() - { - Vector inputLb = { -1, 0, 1 }; - Vector inputUb = { 0, 2, 4 }; - Vector input = { -0.5, 1, 2.5 }; + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - double value = NLR::DeepPolySoftmaxElement::ERLowerBound( input, inputLb, inputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0114799, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.00563867, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.000838421, 0.00001 ) ); + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - Vector outputLb = { 0.2, 0, 0 }; - Vector outputUb = { 0.4, 0.1, 0.1 }; + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - value = NLR::DeepPolySoftmaxElement::ERUpperBound( input, outputLb, outputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, -1.44538, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 1.96538, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.358535, 0.00001 ) ); - } + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - void test_softmax_bounds_lse1() - { - Vector inputLb = { -1, 0, 1 }; - Vector inputUb = { 0, 2, 3 }; - Vector input = { -0.5, 1, 2 }; - double value = NLR::DeepPolySoftmaxElement::LSELowerBound( input, inputLb, inputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.00703444, 0.001 ) ); + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.0674, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), - Vector outputLb = { 0.2, 0, 0 }; - Vector outputUb = { 0.4, 0.1, 0.1 }; - value = NLR::DeepPolySoftmaxElement::LSEUpperBound( input, outputLb, outputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.164165, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.272204, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.073207, 0.00001 ) ); + Tightening( 10, 0.0674, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), + + Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.0674, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - - void test_sbt_bilinear() + + void test_backwards_softmax_and_max2() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTBilinear( nlr, tableau ); + populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -2 ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 2] - x1: [-2, 1] - - Layer 1: - - x2.lb = x0 - 2x1 : [-1, 6] - x2.ub = x0 - 2x1 : [-1, 6] - x3.lb = x0 + x1 : [-1, 3] - x3.ub = x0 + x1 : [-1, 3] + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - Coefficients for bilinear layer: - Lower bound: - alpha_l = x3.lb = -1 - beta = x2.lb = -1 - gamma_l = -x2.lb * x3.lb = --1 * -1 = -1 + List expectedBounds( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - Upper bound: - alpha_u = x3.ub = 3 - beta = x2.lb = -1 - gamma_u = -x2.lb * x3.ub = --1 * 3 = 3 - - x4.lb = -1 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + -1 = -2x0 + x1 - 1 : [-7, -2] - x4.ub = 3 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + 3 = 2x0 - 7x1 + 3 : [0, 21] - x4 range: [-7, 21] - - Layer 3: + Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), + Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), + Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), + Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), + + Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9635, Tightening::UB ), + Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9806, Tightening::UB ), + Tightening( 13, -2.9902, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), + + Tightening( 14, 0.1086, Tightening::LB ), Tightening( 14, 0.9971, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8766, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4595, Tightening::UB ), + + Tightening( 17, 0.1086, Tightening::LB ), Tightening( 17, 0.9971, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); - x7.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] - x7.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] - x4 range: [-21, 5] - */ + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); - List expectedBounds( { Tightening( 2, -1, Tightening::LB ), - Tightening( 2, 6, Tightening::UB ), - Tightening( 3, -1, Tightening::LB ), - Tightening( 3, 3, Tightening::UB ), - Tightening( 4, -7, Tightening::LB ), - Tightening( 4, 21, Tightening::UB ), - Tightening( 5, -21, Tightening::LB ), - Tightening( 5, 7, Tightening::UB ) } ); - List bounds; + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), + + Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9989, Tightening::UB ), + Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), + Tightening( 13, -2.9989, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), + + Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9972, Tightening::UB ), + Tightening( 15, 0.0024, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), + + Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9972, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - - void test_concretize_input_assignment() + + void test_backwards_relu_and_bilinear() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); - populateNetwork( nlr ); - - // With ReLUs, Inputs are zeros, only biases count - tableau.nextValues[0] = 0; - tableau.nextValues[1] = 0; - - Map assignment; - - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - TS_ASSERT( assignment.size() == 14 ); - TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 4 ) ); + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - // With ReLUs, case 1 - tableau.nextValues[0] = 1; - tableau.nextValues[1] = 1; + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 1 ) ); - // With ReLUs, case 2 - tableau.nextValues[0] = 1; - tableau.nextValues[1] = 2; + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - TS_ASSERT( FloatUtils::areEqual( assignment[12], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 0 ) ); + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { Tightening( 7, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - - - void test_obtain_bound_from_ipq() + + void test_backwards_relu_and_bilinear2() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); - - Query query; - query.setNumberOfVariables( 14 ); - - - // Initialize the bounds - query.setLowerBound( 0, -1 ); - query.setUpperBound( 0, 1 ); - query.setLowerBound( 1, -1 ); - query.setUpperBound( 1, 1 ); - - double large = 1000; - query.setLowerBound( 2, -large ); - query.setUpperBound( 2, large ); - query.setLowerBound( 3, -large ); - query.setUpperBound( 3, large ); - query.setLowerBound( 4, -large ); - query.setUpperBound( 4, large ); - query.setLowerBound( 5, -large ); - query.setUpperBound( 5, large ); - query.setLowerBound( 6, -large ); - query.setUpperBound( 6, large ); - query.setLowerBound( 7, -large ); - query.setUpperBound( 7, large ); - query.setLowerBound( 8, -large ); - query.setUpperBound( 8, large ); - query.setLowerBound( 9, -large ); - query.setUpperBound( 9, large ); - query.setLowerBound( 10, -large ); - query.setUpperBound( 10, large ); - query.setLowerBound( 11, -large ); - query.setUpperBound( 11, large ); - query.setLowerBound( 12, -large ); - query.setUpperBound( 12, large ); - query.setLowerBound( 13, -large ); - query.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds( query ) ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - List bounds; + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { Tightening( 7, 0, Tightening::LB ), - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } bool boundsEqual( const List &bounds, const List &expectedBounds ) @@ -7740,4 +12195,21 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } return allFound; } + + void updateTableau( MockTableau &tableau, List &tightenings ) + { + ASSERT( tableau ); + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + { + tableau.setLowerBound( tightening._variable, tightening._value ); + } + + if ( tightening._type == Tightening::UB ) + { + tableau.setUpperBound( tightening._variable, tightening._value ); + } + } + } }; From 9ee76092ed7ae52453aecd789be868d59e39aeed Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:29:06 +0300 Subject: [PATCH 13/81] 0-6-0: Adding MILP bound types for two backpropagation of bounds algorithms, adjusting in Engine. 0-6-0: Adding MILP bound types for two backpropagation of bounds algorithms, adjusting in Engine. --- src/engine/Engine.cpp | 4 ++++ src/engine/MILPSolverBoundTighteningType.h | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index 35d1ae9f8c..d41ac92dd7 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -1590,6 +1590,8 @@ void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) case MILPSolverBoundTighteningType::LP_RELAXATION_INCREMENTAL: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: _networkLevelReasoner->lpRelaxationPropagation(); break; case MILPSolverBoundTighteningType::MILP_ENCODING: @@ -1681,6 +1683,8 @@ void Engine::performMILPSolverBoundedTighteningForSingleLayer( unsigned targetIn return; case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: case MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION: case MILPSolverBoundTighteningType::NONE: return; diff --git a/src/engine/MILPSolverBoundTighteningType.h b/src/engine/MILPSolverBoundTighteningType.h index f317639375..ad839706f7 100644 --- a/src/engine/MILPSolverBoundTighteningType.h +++ b/src/engine/MILPSolverBoundTighteningType.h @@ -34,6 +34,10 @@ enum class MILPSolverBoundTighteningType { // Perform backward analysis BACKWARD_ANALYSIS_ONCE = 5, BACKWARD_ANALYSIS_CONVERGE = 6, + // Perform backward analysis using the INVPROP Algorithm (arXiv:2302.01404v4 [cs.LG]) + BACKWARD_ANALYSIS_INVPROP = 7, + // Perform backward analysis using the PreimageApproximation Algorithm (arXiv:2305.03686v4 [cs.SE]) + BACKWARD_ANALYSIS_PREIMAGE_APPROX = 8, // Option to have no MILP bound tightening performed NONE = 10, }; From 2d9b85a36aabb28f0cfce05607e3368e1e86a71e Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:31:54 +0300 Subject: [PATCH 14/81] 0-6-1: Adjusting in Options for two new MILP Bound types. 0-6-1: Adjusting in Options for two new MILP Bound types. --- src/configuration/Options.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/configuration/Options.cpp b/src/configuration/Options.cpp index bf3b9b913a..17d570133f 100644 --- a/src/configuration/Options.cpp +++ b/src/configuration/Options.cpp @@ -207,6 +207,10 @@ MILPSolverBoundTighteningType Options::getMILPSolverBoundTighteningType() const return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE; if ( strategyString == "backward-converge" ) return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE; + if ( strategyString == "backward-invprop" ) + return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP; + if ( strategyString == "backward-preimage-approx" ) + return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX; else if ( strategyString == "milp" ) return MILPSolverBoundTighteningType::MILP_ENCODING; else if ( strategyString == "milp-inc" ) From f5c7d1c25bc3c1d03784d799ac2c9ad7280510bf Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:34:54 +0300 Subject: [PATCH 15/81] 0-6-2: Introducing Polygonal Tightenings (output type of two new algorithms). 0-6-2: Introducing Polygonal Tightenings (output type of two new algorithms). --- src/engine/PolygonalTightening.h | 97 ++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 src/engine/PolygonalTightening.h diff --git a/src/engine/PolygonalTightening.h b/src/engine/PolygonalTightening.h new file mode 100644 index 0000000000..538ed9cee0 --- /dev/null +++ b/src/engine/PolygonalTightening.h @@ -0,0 +1,97 @@ +/********************* */ +/*! \file PolygonalTightening.h + ** \verbatim + ** Top contributors (to current version): + ** Duligur Ibeling, Guy Katz, Ido Shmuel + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + +**/ + +#ifndef __PolygonalTightening_h__ +#define __PolygonalTightening_h__ + +#include +#include "NeuronIndex.h" +#include "Map.h" + +class PolygonalTightening +{ +public: + enum PolygonalBoundType { + LB = 0, + UB = 1, + }; + + PolygonalTightening( Map neuronToCoefficient, double value, PolygonalBoundType type ) + : _neuronToCoefficient( neuronToCoefficient ) + , _value( value ) + , _type( type ) + { + } + + /* + The coefficient of each neuron. + */ + Map _neuronToCoefficient; + + /* + Its new value. + */ + double _value; + + /* + Whether the tightening tightens the + lower bound or the upper bound. + */ + PolygonalBoundType _type; + + /* + Equality operator. + */ + bool operator==( const PolygonalTightening &other ) const + { + bool allFound = true; + for ( const auto &pair : _neuronToCoefficient ) + { + bool currentFound = false; + for ( const auto &otherPair : other._neuronToCoefficient ) + { + currentFound |= ( pair.first._layer == otherPair.first._layer + && pair.first._neuron == otherPair.first._neuron + && pair.second == otherPair.second ); + } + allFound &= currentFound; + } + bool result = allFound && _value == other._value && _type == other._type; + return result; + } + + void dump() const + { + printf( "PolygonalTightening: x %s %.2lf\n", _type == LB ? ">=" : "<=", _value ); + + for ( const auto &pair : _neuronToCoefficient ) + { + printf( "NeuronIndex: (layer %u, neuron %u), Coefficient: %.2lf )\n", + pair.first._layer, + pair.first._neuron, + pair.second ); + } + } +}; + +#endif // __PolygonalTightening_h__ + +// +// Local Variables: +// compile-command: "make -C ../.. " +// tags-file-name: "../../TAGS" +// c-basic-offset: 4 +// End: +//s From b363d05e5a6904bd99c7174eff358d87f4153ad2 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:36:51 +0300 Subject: [PATCH 16/81] 0-6-3: Adjusting in NLR, LayerOwner for Polygonal Tightenings. 0-6-3: Adjusting in NLR, LayerOwner for Polygonal Tightenings. --- src/nlr/LayerOwner.h | 2 ++ src/nlr/NetworkLevelReasoner.cpp | 16 ++++++++++++++++ src/nlr/NetworkLevelReasoner.h | 15 ++++++++++++++- 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/src/nlr/LayerOwner.h b/src/nlr/LayerOwner.h index 180a4fdebd..122c6a216a 100644 --- a/src/nlr/LayerOwner.h +++ b/src/nlr/LayerOwner.h @@ -18,6 +18,7 @@ #include "ITableau.h" #include "Tightening.h" +#include "PolygonalTightening.h" namespace NLR { @@ -35,6 +36,7 @@ class LayerOwner virtual const ITableau *getTableau() const = 0; virtual unsigned getNumberOfLayers() const = 0; virtual void receiveTighterBound( Tightening tightening ) = 0; + virtual void receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ) = 0; }; } // namespace NLR diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index 88621fcaed..04b9b20683 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -198,6 +198,22 @@ void NetworkLevelReasoner::clearConstraintTightenings() _boundTightenings.clear(); } +void NetworkLevelReasoner::receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ) +{ + _polygonalBoundTightenings.append( polygonal_tightening ); +} + +void NetworkLevelReasoner::getConstraintPolygonalTightenings( List &polygonal_tightenings ) +{ + polygonal_tightenings = _polygonalBoundTightenings; + _polygonalBoundTightenings.clear(); +} + +void NetworkLevelReasoner::clearConstraintPolygonalTightenings() +{ + _polygonalBoundTightenings.clear(); +} + void NetworkLevelReasoner::symbolicBoundPropagation() { for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index 97af9610f9..fa125f0116 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -26,6 +26,7 @@ #include "PiecewiseLinearFunctionType.h" #include "Tightening.h" #include "Vector.h" +#include "PolygonalTightening.h" #include @@ -114,6 +115,13 @@ class NetworkLevelReasoner : public LayerOwner - getConstraintTightenings: this is the function that an external user calls in order to collect the tighter bounds discovered by the NLR. + + - receiveTighterPolygonalBound: this is a callback from the layer + objects, through which they report tighter polygonal bounds. + + - getConstraintPolygonalTightenings: this is the function that an + external user calls in order to collect the tighter polygonal bounds + discovered by the NLR. */ void setTableau( const ITableau *tableau ); @@ -133,6 +141,10 @@ class NetworkLevelReasoner : public LayerOwner void receiveTighterBound( Tightening tightening ); void getConstraintTightenings( List &tightenings ); void clearConstraintTightenings(); + + void receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ); + void getConstraintPolygonalTightenings( List &polygonal_tightenings ); + void clearConstraintPolygonalTightenings(); /* For debugging purposes: dump the network topology @@ -198,8 +210,9 @@ class NetworkLevelReasoner : public LayerOwner Map _layerIndexToLayer; const ITableau *_tableau; - // Tightenings discovered by the various layers + // Tightenings and Polyognal Tightenings discovered by the various layers List _boundTightenings; + List _polygonalBoundTightenings; std::unique_ptr _deepPolyAnalysis; From 7993b1e08f4ebed031fdda244710c5663e7ea2ba Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:39:14 +0300 Subject: [PATCH 17/81] 0-6-4: Introducing parameterised bounds in LPFormulator (two new algorithms will optimize value of parameter). 0-6-4: Introducing parameterised bounds in LPFormulator (two new algorithms will optimize value of parameter). --- src/nlr/LPFormulator.cpp | 986 ++++++++++++++++++++++++++++++++++++++- src/nlr/LPFormulator.h | 47 ++ 2 files changed, 1024 insertions(+), 9 deletions(-) diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 9341c7188e..8878cbadd0 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -553,7 +553,6 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRlaxation( ThreadArgument &args, } } - void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument &argument ) { try @@ -845,7 +844,6 @@ void LPFormulator::addReluLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -910,7 +908,6 @@ void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -986,7 +983,6 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1081,8 +1077,6 @@ void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, } } - - void LPFormulator::addSignLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1166,7 +1160,6 @@ void LPFormulator::addSignLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1248,7 +1241,6 @@ void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1508,7 +1500,6 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const } } - void LPFormulator::addWeightedSumLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1662,6 +1653,983 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, } } +void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + switch ( layer->getLayerType() ) + { + case Layer::INPUT: + addInputLayerToParameterisedLpRelaxation( gurobi, layer, coeff ); + break; + + case Layer::RELU: + addReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + case Layer::WEIGHTED_SUM: + addWeightedSumLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + case Layer::ROUND: + addRoundLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + case Layer::LEAKY_RELU: + addLeakyReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + case Layer::ABSOLUTE_VALUE: + addAbsoluteValueLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + case Layer::SIGN: + addSignLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + case Layer::MAX: + addMaxLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + case Layer::SIGMOID: + addSigmoidLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + case Layer::SOFTMAX: + addSoftmaxLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + case Layer::BILINEAR: + addBilinearLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + default: + throw NLRError( NLRError::LAYER_TYPE_NOT_SUPPORTED, "LPFormulator" ); + break; + } +} + +void LPFormulator::addInputLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + unsigned variable = layer->neuronToVariable( i ); + gurobi.addVariable( Stringf( "x%u", variable ), layer->getLb( i ), layer->getUb( i ) ); + } +} + +void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = sourceValue > 0 ? sourceValue : 0; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), 0, layer->getUb( i ) ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The ReLU is active, y = x + if ( sourceLb < 0 ) + sourceLb = 0; + + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // The ReLU is inactive, y = 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else + { + /* + The phase of this ReLU is not yet fixed. + + For y = ReLU(x), we add the following triangular relaxation: + + 1. y >= 0 + 2. y >= coeff * x + 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) + */ + + // y >= 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= coeff * x, i.e. y - coeff * x >= 0 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -coeff, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + /* + u ul + y <= ----- x - ----- + u - l u - l + */ + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -sourceUb / ( sourceUb - sourceLb ), + Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, + ( -sourceUb * sourceLb ) / ( sourceUb - sourceLb ) ); + } + } + } +} + +void LPFormulator::addRoundLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = FloatUtils::round(sourceValue); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + double ub = std::max( FloatUtils::round( sourceUb ), layer->getUb( i ) ); + double lb = std::min( FloatUtils::round( sourceLb ), layer->getLb( i ) ); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // If u = l: y = round(u) + if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) + { + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, ub ); + } + + else + { + List terms; + // y <= x + 0.5, i.e. y - x <= 0.5 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, 0.5 ); + + // y >= x - 0.5, i.e. y - x >= -0.5 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, -0.5 ); + } + } + } +} + +void LPFormulator::addAbsoluteValueLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = sourceValue > 0 ? sourceValue : -sourceValue; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + double ub = std::min( FloatUtils::max( -sourceLb, sourceUb ), layer->getUb( i ) ); + + gurobi.addVariable(Stringf( "x%u", targetVariable ), 0, ub ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The AbsoluteValue is active, y = x + if ( sourceLb < 0 ) + sourceLb = 0; + + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // The AbsoluteValue is inactive, y = -x, i.e. y + x = 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else + { + /* + The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). + */ + + // y >= 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y <= max(-lb, ub) + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addLeqConstraint( terms, ub ); + } + } + } +} + +void LPFormulator::addSigmoidLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = SigmoidConstraint::sigmoid(sourceValue); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + double ub = std::min( SigmoidConstraint::sigmoid( sourceUb ), layer->getUb( i ) ); + double lb = std::max( SigmoidConstraint::sigmoid( sourceLb ), layer->getLb( i ) ); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // If u = l: y = sigmoid(u) + if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) + { + List terms; + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, ub ); + } + + else + { + List terms; + double lambda = ( ub - lb ) / ( sourceUb - sourceLb ); + double lambdaPrime = std::min( SigmoidConstraint::sigmoidDerivative( sourceLb ), + SigmoidConstraint::sigmoidDerivative( sourceUb ) ); + + // update lower bound + if ( FloatUtils::isPositive( sourceLb ) ) + { + // y >= lambda * (x - l), i.e. y - lambda * x >= lambda * l + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, sourceLb * lambda ); + } + + else + { + // y >= lambda' * (x - l), i.e. y - lambda' * x >= lambda' * l + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, sourceLb * lambdaPrime ); + } + + // update upper bound + if ( !FloatUtils::isPositive( sourceUb ) ) + { + // y <= lambda * (x - u), i.e. y - lambda * x <= lambda * u + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, sourceUb * lambda ); + } + else + { + // y <= lambda' * (x - u), i.e. y - lambda' * x <= lambda' * u + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, sourceUb * lambdaPrime ); + } + } + } + } +} + +void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( layer->neuronEliminated( i ) ) + continue; + + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = FloatUtils::isNegative( sourceValue ) ? -1 : 1; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The Sign is positive, y = 1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), 1, 1 ); + } + else if ( FloatUtils::isNegative( sourceUb ) ) + { + // The Sign is negative, y = -1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, -1 ); + } + else + { + /* + The phase of this Sign is not yet fixed. + + For y = Sign(x), we add the following parallelogram relaxation: + + 1. y >= -1 + 2. y <= -1 + 3. y is below the line the crosses (x.lb,-1) and (0,1) + 4. y is above the line the crosses (0,-1) and (x.ub,1) + */ + + // -1 <= y <= 1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, 1 ); + + /* + 2 + y <= ----- * (1 - coeff) x + 1 + - l + */ + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( 2.0 / sourceLb * (1 - coeff), Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, 1 ); + + /* + 2 + y >= ----- * (1 - coeff) x - 1 + u + */ + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( + GurobiWrapper::Term( -2.0 / sourceUb * (1 - coeff), Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, -1 ); + } + } +} + +void LPFormulator::addMaxLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( layer->neuronEliminated( i ) ) + continue; + + unsigned targetVariable = layer->neuronToVariable( i ); + gurobi.addVariable( + Stringf( "x%u", targetVariable ), layer->getLb( i ), layer->getUb( i ) ); + + List sources = layer->getActivationSources( i ); + + bool haveFixedSourceValue = false; + double maxFixedSourceValue = FloatUtils::negativeInfinity(); + + double maxConcreteUb = FloatUtils::negativeInfinity(); + + List terms; + + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + haveFixedSourceValue = true; + double value = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + if ( value > maxFixedSourceValue ) + maxFixedSourceValue = value; + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + + // Target is at least source: target - source >= 0 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // Find maximal concrete upper bound + if ( sourceUb > maxConcreteUb ) + maxConcreteUb = sourceUb; + } + + if ( haveFixedSourceValue && ( maxConcreteUb < maxFixedSourceValue ) ) + { + // At least one of the sources has a fixed value, + // and this fixed value dominates other sources. + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, maxFixedSourceValue ); + } + else + { + // If we have a fixed value, it's a lower bound + if ( haveFixedSourceValue ) + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addGeqConstraint( terms, maxFixedSourceValue ); + } + + // Target must be smaller than greatest concrete upper bound + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addLeqConstraint( terms, maxConcreteUb ); + } + } +} + +void LPFormulator::addSoftmaxLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( layer->neuronEliminated( i ) ) + continue; + + Set handledInputNeurons; + List sources = layer->getActivationSources( i ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceMids; + Vector targetLbs; + Vector targetUbs; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceMids.append( ( sourceLb + sourceUb ) / 2 ); + targetLbs.append( layer->getLb( i ) ); + targetUbs.append( layer->getUb( i ) ); + } + + // Find the index of i in the softmax + unsigned index = 0; + for ( const auto &source : sources ) + { + if ( handledInputNeurons.exists( source._neuron ) ) + ++index; + else + { + handledInputNeurons.insert( source._neuron ); + break; + } + } + + double ub = std::min( DeepPolySoftmaxElement::linearUpperBound( sourceLbs, sourceUbs, index ), layer->getUb( i ) ); + double lb = std::max( DeepPolySoftmaxElement::linearLowerBound( sourceLbs, sourceUbs, index ), layer->getLb( i ) ); + + unsigned targetVariable = layer->neuronToVariable( i ); + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + double bias; + SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); + + + List terms; + if ( FloatUtils::areEqual( lb, ub ) ) + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, ub ); + } + else + { + // Compute symbolic bound + if ( boundType == SoftmaxBoundType::LOG_SUM_EXP_DECOMPOSITION ) + { + bool useLSE2 = false; + for ( const auto &lb : targetLbs ) + { + if ( lb > GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD ) + useLSE2 = true; + } + unsigned inputIndex = 0; + if ( !useLSE2 ) + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dldj = + DeepPolySoftmaxElement::dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addGeqConstraint( terms, bias ); + } + else + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dldj = + DeepPolySoftmaxElement::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addGeqConstraint( terms, bias ); + } + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + inputIndex = 0; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dudj = + DeepPolySoftmaxElement::dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addLeqConstraint( terms, bias ); + } + else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + unsigned inputIndex = 0; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dldj = + DeepPolySoftmaxElement::dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addGeqConstraint( terms, bias ); + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); + inputIndex = 0; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dudj = + DeepPolySoftmaxElement::dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addLeqConstraint( terms, bias ); + } + } + } +} + +void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + Vector sourceNeurons; + unsigned counter = 0; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + + sourceNeurons.append( sourceNeuron ); + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + ++counter; + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + ++counter; + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + double targetValue = sourceValues[0] * sourceValues[1]; + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + continue; + } + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // Lower bound: out >= l_y * x + l_x * y - l_x * l_y + List terms; + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -sourceLbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( -sourceLbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addGeqConstraint( terms, -sourceLbs[0] * sourceLbs[1] ); + + // Upper bound: out <= u_y * x + l_x * y - l_x * u_y + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -sourceUbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( -sourceLbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addLeqConstraint( terms, -sourceLbs[0] * sourceUbs[1] ); + } + } +} + +void LPFormulator::addWeightedSumLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + if ( createVariables ) + { + for ( const auto &sourceLayerPair : layer->getSourceLayers() ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerPair.first ); + unsigned sourceLayerSize = sourceLayerPair.second; + + for ( unsigned j = 0; j < sourceLayerSize; ++j ) + { + if ( !sourceLayer->neuronEliminated( j ) ) + { + Stringf sourceVariableName( "x%u", sourceLayer->neuronToVariable( j ) ); + if ( !gurobi.containsVariable( sourceVariableName ) ) + { + gurobi.addVariable( + sourceVariableName, sourceLayer->getLb( j ), sourceLayer->getUb( j ) ); + } + } + } + } + } + + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned variable = layer->neuronToVariable( i ); + + gurobi.addVariable( Stringf( "x%u", variable ), layer->getLb( i ), layer->getUb( i ) ); + + List terms; + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", variable ) ) ); + + double bias = -layer->getBias( i ); + + for ( const auto &sourceLayerPair : layer->getSourceLayers() ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerPair.first ); + unsigned sourceLayerSize = sourceLayerPair.second; + + for ( unsigned j = 0; j < sourceLayerSize; ++j ) + { + double weight = layer->getWeight( sourceLayerPair.first, j, i ); + if ( !sourceLayer->neuronEliminated( j ) ) + { + Stringf sourceVariableName( "x%u", sourceLayer->neuronToVariable( j ) ); + terms.append( GurobiWrapper::Term( weight, sourceVariableName ) ); + } + else + { + bias -= weight * sourceLayer->getEliminatedNeuronValue( j ); + } + } + } + + gurobi.addEqConstraint( terms, bias ); + } + } +} + +void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + double slope = layer->getAlpha(); + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = sourceValue > 0 ? sourceValue : 0; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + gurobi.addVariable( + Stringf( "x%u", targetVariable ), layer->getLb( i ), layer->getUb( i ) ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The LeakyReLU is active, y = x + + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // The LeakyReLU is inactive, y = ((alpha+1) * coeff - alpha) * x + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( slope - ( slope + 1 ) * coeff, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else + { + double width = sourceUb - sourceLb; + double coeff = ( sourceUb - slope * sourceLb ) / width; + double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; + + /* + The phase of this LeakyReLU is not yet fixed. + For y = LeakyReLU(x), we add the following triangular relaxation: + 1. y >= ((alpha+1) * coeff - alpha) * x + 2. y >= x + 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) + */ + + // y >= ((alpha+1) * coeff - alpha) * x + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( slope - ( slope + 1 ) * coeff, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= x, i.e. y - x >= 0 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -coeff, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, bias ); + } + } + } +} + void LPFormulator::setCutoff( double cutoff ) { _cutoffInUse = true; diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index 135854bf6b..07143618d8 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -20,6 +20,7 @@ #include "LayerOwner.h" #include "Map.h" #include "ParallelSolver.h" +#include "PolygonalTightening.h" #include #include @@ -53,6 +54,8 @@ class LPFormulator : public ParallelSolver */ void optimizeBoundsWithLpRelaxation( const Map &layers, bool backward = false ); + void optimizeBoundsWithInvprop( const Map &layers ); + void optimizeBoundsWithPreimageApproximation( const Map &layers ); void optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ); void optimizeBoundsWithIncrementalLpRelaxation( const Map &layers ); @@ -124,6 +127,50 @@ class LPFormulator : public ParallelSolver bool createVariables ); void optimizeBoundsOfNeuronsWithLpRlaxation( ThreadArgument &args, bool backward ); + + + // Create LP relaxations depending on an external parameter. + void addLayerToParameterisedModel( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ); + + void + addInputLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, double coeff ); + + void + addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); + + void + addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); + + void + addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); + + void + addMaxLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); + + void + addRoundLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); + + void + addAbsoluteValueLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); + + void + addSigmoidLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); + + void + addSoftmaxLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); + + void + addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); + + void + addWeightedSumLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ); + /* Optimize for the min/max value of variableName with respect to the constraints From c888160d133d59899a56cc22fc7a79b857151dca Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 10 Oct 2024 20:53:12 +0300 Subject: [PATCH 18/81] 0-6-5: Current state of PreimageApproximation + cleanup + corrections. 0-6-5: Current state of PreimageApproximation + cleanup + corrections. --- src/nlr/LPFormulator.cpp | 966 ++++++------------------------- src/nlr/LPFormulator.h | 39 +- src/nlr/Layer.cpp | 958 +++++++++++++++++++++++++----- src/nlr/Layer.h | 11 +- src/nlr/NetworkLevelReasoner.cpp | 12 + src/nlr/NetworkLevelReasoner.h | 9 +- 6 files changed, 1005 insertions(+), 990 deletions(-) diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 8878cbadd0..6876586a57 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -15,13 +15,13 @@ #include "LPFormulator.h" +#include "DeepPolySoftmaxElement.h" #include "GurobiWrapper.h" #include "InfeasibleQueryException.h" #include "Layer.h" #include "MStringf.h" #include "NLRError.h" #include "Options.h" -#include "DeepPolySoftmaxElement.h" #include "TimeUtils.h" #include "Vector.h" @@ -249,7 +249,8 @@ void LPFormulator::optimizeBoundsWithIncrementalLpRelaxation( const Map &layers, - bool backward ) + bool backward, + std::vector coeffs ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -302,7 +303,7 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map &solverToIndex ); // optimize every neuron of layer - optimizeBoundsOfNeuronsWithLpRlaxation( argument, backward ); + optimizeBoundsOfNeuronsWithLpRelaxation( argument, backward, coeffs ); LPFormulator_LOG( Stringf( "Tightening bound for layer %u - done", layerIndex ).ascii() ); } @@ -336,6 +337,40 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map throw InfeasibleQueryException(); } +void LPFormulator::optimizeBoundsWithInvprop( const Map &layers ) +{ + Map coeffs; + for ( const auto &pair : layers ) + { + //coeffs.insert( pair.first, GlobalConfiguration::INVPROP_INITIAL_ALPHA ); + coeffs.insert( pair.first, 0.5 ); + } +} + +void LPFormulator::optimizeBoundsWithPreimageApproximation( const Map &layers ) +{ + // Define EstimateVolume bind which captures layers and only receives coeffs as its input. + auto EstimateVolumeBind = std::bind( EstimateVolume, layers, std::placeholders::_1 ); + + // Search over coeffs in [0, 1]^depth. Enforce single-threaded optimization. + auto opt_params = boost::math::optimization::differential_evolution_parameters>(); + unsigned depth = layers.size(); + opt_params.lower_bounds.resize( depth, 0 ); + opt_params.upper_bounds.resize( depth, 1 ); + opt_params.threads = 1; + double value_to_reach = 0; + std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); + + // Find optimal coeffs and run parameterised backward LP relaxation. + auto optimal_coeffs = boost::math::optimization::differential_evolution( EstimateVolumeBind, opt_params, rng, value_to_reach ); + optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs ); +} + +double LPFormulator::EstimateVolume( const Map &layers, std::vector coeffs ) +{ + return 0; +} + void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ) { @@ -384,7 +419,7 @@ void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map coeffs ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -522,9 +557,9 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRlaxation( ThreadArgument &args, mtx.lock(); if ( backward ) - createLPRelaxationAfter( layers, *freeSolver, lastIndexOfRelaxation ); + createLPRelaxationAfter( layers, *freeSolver, lastIndexOfRelaxation, coeffs ); else - createLPRelaxation( layers, *freeSolver, lastIndexOfRelaxation ); + createLPRelaxation( layers, *freeSolver, lastIndexOfRelaxation, coeffs ); mtx.unlock(); // spawn a thread to tighten the bounds for the current variable @@ -645,20 +680,29 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & void LPFormulator::createLPRelaxation( const Map &layers, GurobiWrapper &gurobi, - unsigned lastLayer ) + unsigned lastLayer, + std::vector coeffs ) { for ( const auto &layer : layers ) { - if ( layer.second->getLayerIndex() > lastLayer ) + unsigned currentLayerIndex = layer.second->getLayerIndex(); + if ( currentLayerIndex > lastLayer ) continue; - addLayerToModel( gurobi, layer.second, false ); + if ( coeffs.empty() ) + addLayerToModel( gurobi, layer.second, false ); + else + { + double coeff = coeffs[currentLayerIndex]; + addLayerToParameterisedModel( gurobi, layer.second, false, coeff ); + } } } void LPFormulator::createLPRelaxationAfter( const Map &layers, GurobiWrapper &gurobi, - unsigned firstLayer ) + unsigned firstLayer, + std::vector coeffs ) { unsigned depth = GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH; std::priority_queue, std::greater> layersToAdd; @@ -676,7 +720,14 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers continue; else { - addLayerToModel( gurobi, currentLayer, true ); + if ( coeffs.empty() ) + addLayerToModel( gurobi, currentLayer, true ); + else + { + double coeff = coeffs[currentLayerIndex]; + addLayerToParameterisedModel( gurobi, currentLayer, true, coeff ); + } + for ( const auto &nextLayer : currentLayer->getSuccessorLayers() ) { if ( layerToDepth.exists( nextLayer ) ) @@ -1203,7 +1254,6 @@ void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); - // Target is at least source: target - source >= 0 terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1298,7 +1348,6 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, double bias; SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); - List terms; if ( FloatUtils::areEqual( lb, ub ) ) { @@ -1621,7 +1670,7 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, else { double width = sourceUb - sourceLb; - double coeff = ( sourceUb - slope * sourceLb ) / width; + double weight = ( sourceUb - slope * sourceLb ) / width; double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; /* @@ -1646,7 +1695,7 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -coeff, Stringf( "x%u", sourceVariable ) ) ); + terms.append( GurobiWrapper::Term( -weight, Stringf( "x%u", sourceVariable ) ) ); gurobi.addLeqConstraint( terms, bias ); } } @@ -1660,65 +1709,25 @@ void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, { switch ( layer->getLayerType() ) { - case Layer::INPUT: - addInputLayerToParameterisedLpRelaxation( gurobi, layer, coeff ); - break; - + case Layer::RELU: addReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); break; - case Layer::WEIGHTED_SUM: - addWeightedSumLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); - break; - - case Layer::ROUND: - addRoundLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); - break; - case Layer::LEAKY_RELU: addLeakyReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); break; - case Layer::ABSOLUTE_VALUE: - addAbsoluteValueLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); - break; - case Layer::SIGN: addSignLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); break; - - case Layer::MAX: - addMaxLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); - break; - - case Layer::SIGMOID: - addSigmoidLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); - break; - case Layer::SOFTMAX: - addSoftmaxLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); - break; - - case Layer::BILINEAR: - addBilinearLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); - break; - default: - throw NLRError( NLRError::LAYER_TYPE_NOT_SUPPORTED, "LPFormulator" ); + addLayerToModel( gurobi, layer, createVariables ); break; } } -void LPFormulator::addInputLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, double coeff ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - unsigned variable = layer->neuronToVariable( i ); - gurobi.addVariable( Stringf( "x%u", variable ), layer->getLb( i ), layer->getUb( i ) ); - } -} - void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, @@ -1789,7 +1798,7 @@ void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurob terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); gurobi.addGeqConstraint( terms, 0 ); - // y >= coeff * x, i.e. y - coeff * x >= 0 + // y >= coeff * x, i.e. y - coeff * x >= 0 (varies continuously between y >= 0 and y >= alpha * x). terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -coeff, Stringf( "x%u", sourceVariable ) ) ); @@ -1811,76 +1820,96 @@ void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurob } } -void LPFormulator::addRoundLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) +void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { - if ( !layer->neuronEliminated( i ) ) + if ( layer->neuronEliminated( i ) ) + continue; + + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) { - unsigned targetVariable = layer->neuronToVariable( i ); + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = FloatUtils::isNegative( sourceValue ) ? -1 : 1; - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = FloatUtils::round(sourceValue); + continue; + } - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); - continue; - } + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The Sign is positive, y = 1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), 1, 1 ); + } + else if ( FloatUtils::isNegative( sourceUb ) ) + { + // The Sign is negative, y = -1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, -1 ); + } + else + { + /* + The phase of this Sign is not yet fixed. - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); + For y = Sign(x), we add the following parallelogram relaxation: - double ub = std::max( FloatUtils::round( sourceUb ), layer->getUb( i ) ); - double lb = std::min( FloatUtils::round( sourceLb ), layer->getLb( i ) ); - - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - // If u = l: y = round(u) - if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) - { - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addEqConstraint( terms, ub ); - } - - else - { - List terms; - // y <= x + 0.5, i.e. y - x <= 0.5 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, 0.5 ); - - // y >= x - 0.5, i.e. y - x >= -0.5 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, -0.5 ); - } + 1. y >= -1 + 2. y <= -1 + 3. y is below the line the crosses (x.lb,-1) and (0,1) + 4. y is above the line the crosses (0,-1) and (x.ub,1) + */ + + // -1 <= y <= 1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, 1 ); + + /* + 2 + y <= ----- * (1 - coeff) x + 1 (varies continuously between y <= 1 and y <= -2 / l * x + 1). + - l + */ + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( 2.0 / sourceLb * (1 - coeff), Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, 1 ); + + /* + 2 + y >= ----- * (1 - coeff) x - 1 (varies continuously between y >= -1 and y >= 2 / u * x - 1). + u + */ + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( + GurobiWrapper::Term( -2.0 / sourceUb * (1 - coeff), Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, -1 ); } } } -void LPFormulator::addAbsoluteValueLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) +void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) { + double slope = layer->getAlpha(); for ( unsigned i = 0; i < layer->getSize(); ++i ) { if ( !layer->neuronEliminated( i ) ) @@ -1895,7 +1924,7 @@ void LPFormulator::addAbsoluteValueLayerToParameterisedLpRelaxation( GurobiWrapp { // If the source neuron has been eliminated, this neuron is constant double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = sourceValue > 0 ? sourceValue : -sourceValue; + double targetValue = sourceValue > 0 ? sourceValue : 0; gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); @@ -1905,19 +1934,17 @@ void LPFormulator::addAbsoluteValueLayerToParameterisedLpRelaxation( GurobiWrapp unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - double ub = std::min( FloatUtils::max( -sourceLb, sourceUb ), layer->getUb( i ) ); - gurobi.addVariable(Stringf( "x%u", targetVariable ), 0, ub ); + gurobi.addVariable( + Stringf( "x%u", targetVariable ), layer->getLb( i ), layer->getUb( i ) ); if ( !FloatUtils::isNegative( sourceLb ) ) { - // The AbsoluteValue is active, y = x - if ( sourceLb < 0 ) - sourceLb = 0; + // The LeakyReLU is active, y = x List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1926,704 +1953,41 @@ void LPFormulator::addAbsoluteValueLayerToParameterisedLpRelaxation( GurobiWrapp } else if ( !FloatUtils::isPositive( sourceUb ) ) { - // The AbsoluteValue is inactive, y = -x, i.e. y + x = 0 + // The LeakyReLU is inactive, y = alpha * x List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", sourceVariable ) ) ); + terms.append( GurobiWrapper::Term( slope, Stringf( "x%u", sourceVariable ) ) ); gurobi.addEqConstraint( terms, 0 ); } else { + double width = sourceUb - sourceLb; + double weight = ( sourceUb - slope * sourceLb ) / width; + double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; + /* - The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). + The phase of this LeakyReLU is not yet fixed. + For y = LeakyReLU(x), we add the following triangular relaxation: + 1. y >= ((1 - alpha) * coeff + alpha) * x (varies continuously between y >= alpha * x and y >= x). + 2. y >= x + 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) */ - // y >= 0 + // y >= ((alpha+1) * coeff - alpha) * x List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope - ( 1 - slope ) * coeff, Stringf( "x%u", sourceVariable ) ) ); gurobi.addGeqConstraint( terms, 0 ); - // y <= max(-lb, ub) + // y >= x, i.e. y - x >= 0 terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addLeqConstraint( terms, ub ); - } - } - } -} - -void LPFormulator::addSigmoidLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = SigmoidConstraint::sigmoid(sourceValue); - - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - - continue; - } + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - double ub = std::min( SigmoidConstraint::sigmoid( sourceUb ), layer->getUb( i ) ); - double lb = std::max( SigmoidConstraint::sigmoid( sourceLb ), layer->getLb( i ) ); - - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - // If u = l: y = sigmoid(u) - if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) - { - List terms; terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addEqConstraint( terms, ub ); - } - - else - { - List terms; - double lambda = ( ub - lb ) / ( sourceUb - sourceLb ); - double lambdaPrime = std::min( SigmoidConstraint::sigmoidDerivative( sourceLb ), - SigmoidConstraint::sigmoidDerivative( sourceUb ) ); - - // update lower bound - if ( FloatUtils::isPositive( sourceLb ) ) - { - // y >= lambda * (x - l), i.e. y - lambda * x >= lambda * l - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, sourceLb * lambda ); - } - - else - { - // y >= lambda' * (x - l), i.e. y - lambda' * x >= lambda' * l - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, sourceLb * lambdaPrime ); - } - - // update upper bound - if ( !FloatUtils::isPositive( sourceUb ) ) - { - // y <= lambda * (x - u), i.e. y - lambda * x <= lambda * u - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, sourceUb * lambda ); - } - else - { - // y <= lambda' * (x - u), i.e. y - lambda' * x <= lambda' * u - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, sourceUb * lambdaPrime ); - } - } - } - } -} - -void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( layer->neuronEliminated( i ) ) - continue; - - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = FloatUtils::isNegative( sourceValue ) ? -1 : 1; - - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - if ( !FloatUtils::isNegative( sourceLb ) ) - { - // The Sign is positive, y = 1 - gurobi.addVariable( Stringf( "x%u", targetVariable ), 1, 1 ); - } - else if ( FloatUtils::isNegative( sourceUb ) ) - { - // The Sign is negative, y = -1 - gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, -1 ); - } - else - { - /* - The phase of this Sign is not yet fixed. - - For y = Sign(x), we add the following parallelogram relaxation: - - 1. y >= -1 - 2. y <= -1 - 3. y is below the line the crosses (x.lb,-1) and (0,1) - 4. y is above the line the crosses (0,-1) and (x.ub,1) - */ - - // -1 <= y <= 1 - gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, 1 ); - - /* - 2 - y <= ----- * (1 - coeff) x + 1 - - l - */ - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( 2.0 / sourceLb * (1 - coeff), Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, 1 ); - - /* - 2 - y >= ----- * (1 - coeff) x - 1 - u - */ - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( - GurobiWrapper::Term( -2.0 / sourceUb * (1 - coeff), Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, -1 ); - } - } -} - -void LPFormulator::addMaxLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( layer->neuronEliminated( i ) ) - continue; - - unsigned targetVariable = layer->neuronToVariable( i ); - gurobi.addVariable( - Stringf( "x%u", targetVariable ), layer->getLb( i ), layer->getUb( i ) ); - - List sources = layer->getActivationSources( i ); - - bool haveFixedSourceValue = false; - double maxFixedSourceValue = FloatUtils::negativeInfinity(); - - double maxConcreteUb = FloatUtils::negativeInfinity(); - - List terms; - - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - haveFixedSourceValue = true; - double value = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - if ( value > maxFixedSourceValue ) - maxFixedSourceValue = value; - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - - // Target is at least source: target - source >= 0 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - // Find maximal concrete upper bound - if ( sourceUb > maxConcreteUb ) - maxConcreteUb = sourceUb; - } - - if ( haveFixedSourceValue && ( maxConcreteUb < maxFixedSourceValue ) ) - { - // At least one of the sources has a fixed value, - // and this fixed value dominates other sources. - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addEqConstraint( terms, maxFixedSourceValue ); - } - else - { - // If we have a fixed value, it's a lower bound - if ( haveFixedSourceValue ) - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addGeqConstraint( terms, maxFixedSourceValue ); - } - - // Target must be smaller than greatest concrete upper bound - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addLeqConstraint( terms, maxConcreteUb ); - } - } -} - -void LPFormulator::addSoftmaxLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( layer->neuronEliminated( i ) ) - continue; - - Set handledInputNeurons; - List sources = layer->getActivationSources( i ); - - Vector sourceLbs; - Vector sourceUbs; - Vector sourceMids; - Vector targetLbs; - Vector targetUbs; - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceMids.append( ( sourceLb + sourceUb ) / 2 ); - targetLbs.append( layer->getLb( i ) ); - targetUbs.append( layer->getUb( i ) ); - } - - // Find the index of i in the softmax - unsigned index = 0; - for ( const auto &source : sources ) - { - if ( handledInputNeurons.exists( source._neuron ) ) - ++index; - else - { - handledInputNeurons.insert( source._neuron ); - break; - } - } - - double ub = std::min( DeepPolySoftmaxElement::linearUpperBound( sourceLbs, sourceUbs, index ), layer->getUb( i ) ); - double lb = std::max( DeepPolySoftmaxElement::linearLowerBound( sourceLbs, sourceUbs, index ), layer->getLb( i ) ); - - unsigned targetVariable = layer->neuronToVariable( i ); - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - double bias; - SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); - - - List terms; - if ( FloatUtils::areEqual( lb, ub ) ) - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addEqConstraint( terms, ub ); - } - else - { - // Compute symbolic bound - if ( boundType == SoftmaxBoundType::LOG_SUM_EXP_DECOMPOSITION ) - { - bool useLSE2 = false; - for ( const auto &lb : targetLbs ) - { - if ( lb > GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD ) - useLSE2 = true; - } - unsigned inputIndex = 0; - if ( !useLSE2 ) - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = - DeepPolySoftmaxElement::dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dldj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addGeqConstraint( terms, bias ); - } - else - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = - DeepPolySoftmaxElement::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dldj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addGeqConstraint( terms, bias ); - } - - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); - inputIndex = 0; - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = - DeepPolySoftmaxElement::dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dudj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addLeqConstraint( terms, bias ); - } - else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); - unsigned inputIndex = 0; - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = - DeepPolySoftmaxElement::dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dldj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addGeqConstraint( terms, bias ); - - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); - inputIndex = 0; - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = - DeepPolySoftmaxElement::dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dudj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addLeqConstraint( terms, bias ); - } - } - } -} - -void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - Vector sourceLbs; - Vector sourceUbs; - Vector sourceValues; - Vector sourceNeurons; - unsigned counter = 0; - bool allConstant = true; - for ( const auto &sourceIndex : sources ) - { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); - - sourceNeurons.append( sourceNeuron ); - sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - ++counter; - - if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) - { - allConstant = false; - } - else - { - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - sourceValues.append( sourceValue ); - } - ++counter; - } - - if ( allConstant ) - { - // If the both source neurons have been eliminated, this neuron is constant - double targetValue = sourceValues[0] * sourceValues[1]; - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - continue; - } - - double lb = FloatUtils::infinity(); - double ub = FloatUtils::negativeInfinity(); - List values = { sourceLbs[0] * sourceLbs[1], - sourceLbs[0] * sourceUbs[1], - sourceUbs[0] * sourceLbs[1], - sourceUbs[0] * sourceUbs[1] }; - for ( const auto &v : values ) - { - if ( v < lb ) - lb = v; - if ( v > ub ) - ub = v; - } - - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - // Lower bound: out >= l_y * x + l_x * y - l_x * l_y - List terms; - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -sourceLbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); - terms.append( GurobiWrapper::Term( -sourceLbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); - gurobi.addGeqConstraint( terms, -sourceLbs[0] * sourceLbs[1] ); - - // Upper bound: out <= u_y * x + l_x * y - l_x * u_y - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -sourceUbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); - terms.append( GurobiWrapper::Term( -sourceLbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); - gurobi.addLeqConstraint( terms, -sourceLbs[0] * sourceUbs[1] ); - } - } -} - -void LPFormulator::addWeightedSumLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) -{ - if ( createVariables ) - { - for ( const auto &sourceLayerPair : layer->getSourceLayers() ) - { - const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerPair.first ); - unsigned sourceLayerSize = sourceLayerPair.second; - - for ( unsigned j = 0; j < sourceLayerSize; ++j ) - { - if ( !sourceLayer->neuronEliminated( j ) ) - { - Stringf sourceVariableName( "x%u", sourceLayer->neuronToVariable( j ) ); - if ( !gurobi.containsVariable( sourceVariableName ) ) - { - gurobi.addVariable( - sourceVariableName, sourceLayer->getLb( j ), sourceLayer->getUb( j ) ); - } - } - } - } - } - - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned variable = layer->neuronToVariable( i ); - - gurobi.addVariable( Stringf( "x%u", variable ), layer->getLb( i ), layer->getUb( i ) ); - - List terms; - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", variable ) ) ); - - double bias = -layer->getBias( i ); - - for ( const auto &sourceLayerPair : layer->getSourceLayers() ) - { - const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerPair.first ); - unsigned sourceLayerSize = sourceLayerPair.second; - - for ( unsigned j = 0; j < sourceLayerSize; ++j ) - { - double weight = layer->getWeight( sourceLayerPair.first, j, i ); - if ( !sourceLayer->neuronEliminated( j ) ) - { - Stringf sourceVariableName( "x%u", sourceLayer->neuronToVariable( j ) ); - terms.append( GurobiWrapper::Term( weight, sourceVariableName ) ); - } - else - { - bias -= weight * sourceLayer->getEliminatedNeuronValue( j ); - } - } - } - - gurobi.addEqConstraint( terms, bias ); - } - } -} - -void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) -{ - double slope = layer->getAlpha(); - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = sourceValue > 0 ? sourceValue : 0; - - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - gurobi.addVariable( - Stringf( "x%u", targetVariable ), layer->getLb( i ), layer->getUb( i ) ); - - if ( !FloatUtils::isNegative( sourceLb ) ) - { - // The LeakyReLU is active, y = x - - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addEqConstraint( terms, 0 ); - } - else if ( !FloatUtils::isPositive( sourceUb ) ) - { - // The LeakyReLU is inactive, y = ((alpha+1) * coeff - alpha) * x - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( slope - ( slope + 1 ) * coeff, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addEqConstraint( terms, 0 ); - } - else - { - double width = sourceUb - sourceLb; - double coeff = ( sourceUb - slope * sourceLb ) / width; - double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; - - /* - The phase of this LeakyReLU is not yet fixed. - For y = LeakyReLU(x), we add the following triangular relaxation: - 1. y >= ((alpha+1) * coeff - alpha) * x - 2. y >= x - 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) - */ - - // y >= ((alpha+1) * coeff - alpha) * x - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( slope - ( slope + 1 ) * coeff, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - // y >= x, i.e. y - x >= 0 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -coeff, Stringf( "x%u", sourceVariable ) ) ); + terms.append( GurobiWrapper::Term( -weight, Stringf( "x%u", sourceVariable ) ) ); gurobi.addLeqConstraint( terms, bias ); } } diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index 07143618d8..4184839d9c 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -53,7 +54,8 @@ class LPFormulator : public ParallelSolver constructed from scratch */ void optimizeBoundsWithLpRelaxation( const Map &layers, - bool backward = false ); + bool backward = false, + std::vector coeffs = {} ); void optimizeBoundsWithInvprop( const Map &layers ); void optimizeBoundsWithPreimageApproximation( const Map &layers ); void optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, @@ -75,10 +77,12 @@ class LPFormulator : public ParallelSolver */ void createLPRelaxation( const Map &layers, GurobiWrapper &gurobi, - unsigned lastLayer = UINT_MAX ); + unsigned lastLayer = UINT_MAX, + std::vector coeffs = {} ); void createLPRelaxationAfter( const Map &layers, GurobiWrapper &gurobi, - unsigned firstLayer ); + unsigned firstLayer, + std::vector coeffs = {} ); double solveLPRelaxation( GurobiWrapper &gurobi, const Map &layers, MinOrMax minOrMax, @@ -126,7 +130,7 @@ class LPFormulator : public ParallelSolver const Layer *layer, bool createVariables ); - void optimizeBoundsOfNeuronsWithLpRlaxation( ThreadArgument &args, bool backward ); + void optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args, bool backward, std::vector coeffs = {} ); // Create LP relaxations depending on an external parameter. @@ -135,9 +139,6 @@ class LPFormulator : public ParallelSolver bool createVariables, double coeff ); - void - addInputLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, double coeff ); - void addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); @@ -147,29 +148,9 @@ class LPFormulator : public ParallelSolver void addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - void - addMaxLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - - void - addRoundLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - - void - addAbsoluteValueLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - - void - addSigmoidLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - - void - addSoftmaxLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - void - addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - - void - addWeightedSumLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ); + // Estimate Volume of parameterised LP relaxations. + static double EstimateVolume( const Map &layers, std::vector coeffs ); /* diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index c7f728e860..6fdb081cba 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -2,7 +2,7 @@ /*! \file Layer.cpp ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Ido Shmuel + ** Guy Katz ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -1061,7 +1061,6 @@ void Layer::computeIntervalArithmeticBoundsForSigmoid() } } - void Layer::computeIntervalArithmeticBoundsForRound() { for ( unsigned i = 0; i < _size; ++i ) @@ -1094,7 +1093,6 @@ void Layer::computeIntervalArithmeticBoundsForRound() } } - void Layer::computeIntervalArithmeticBoundsForMax() { for ( unsigned i = 0; i < _size; ++i ) @@ -1685,8 +1683,6 @@ void Layer::computeSymbolicBoundsForSign() _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - std::cout << "sign, p-ll, ul, lu, uu: " << _symbolicLbOfLb[i] << " " << _symbolicUbOfLb[i] << " " << _symbolicLbOfUb[i] << " " << _symbolicUbOfUb[i] << " " << std::endl; // Has the b variable been fixed? if ( !FloatUtils::isNegative( sourceLb ) ) @@ -1728,7 +1724,6 @@ void Layer::computeSymbolicBoundsForSign() for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicUb[j * _size + i] *= factor; - // Do the same for the bias, and then adjust _symbolicUpperBias[i] *= factor; _symbolicUpperBias[i] += 1; @@ -1756,13 +1751,11 @@ void Layer::computeSymbolicBoundsForSign() for ( unsigned j = 0; j < _inputLayerSize; ++j ) { _symbolicLb[j * _size + i] *= factor; - std::cout << "sign, p-lb: " << _symbolicLb[j * _size + i] << std::endl; } // Do the same for the bias, and then adjust _symbolicLowerBias[i] *= factor; _symbolicLowerBias[i] -= 1; - std::cout << "sign, p-lB: " << _symbolicLowerBias[i] << std::endl; } if (upperSignPhase == PHASE_NOT_FIXED) @@ -2051,17 +2044,17 @@ void Layer::computeSymbolicBoundsForLeakyRelu() // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) // Concrete upper bound: x_f <= ub_b double width = sourceUb - sourceLb; - double coeff = ( sourceUb - _alpha * sourceLb ) / width; + double weight = ( sourceUb - _alpha * sourceLb ) / width; if ( _alpha <= 1 ) { for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - _symbolicUb[j * _size + i] *= coeff; + _symbolicUb[j * _size + i] *= weight; } // Do the same for the bias, and then adjust - _symbolicUpperBias[i] *= coeff; + _symbolicUpperBias[i] *= weight; _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; @@ -2096,11 +2089,11 @@ void Layer::computeSymbolicBoundsForLeakyRelu() { for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - _symbolicLb[j * _size + i] *= coeff; + _symbolicLb[j * _size + i] *= weight; } // Do the same for the bias, and then adjust - _symbolicLowerBias[i] *= coeff; + _symbolicLowerBias[i] *= weight; _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; if ( sourceUb > sourceLb ) @@ -2213,7 +2206,6 @@ void Layer::computeSymbolicBoundsForLeakyRelu() } } - void Layer::computeSymbolicBoundsForSigmoid() { std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); @@ -3301,7 +3293,6 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } - /* We now have the symbolic representation for the current layer. Next, we compute new lower and upper bounds for @@ -3371,178 +3362,831 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } -double Layer::softmaxLSELowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +void Layer::computeParameterisedSymbolicBounds( double coeff ) { - double sum = 0; - for ( unsigned j = 0; j < inputs.size(); ++j ) + switch ( _type ) { - double lj = inputLbs[j]; - double uj = inputUbs[j]; - double xj = inputs[j]; - sum += - ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); - } + case RELU: + computeParameterisedSymbolicBoundsForRelu( coeff ); + break; - return std::exp( inputs[i] ) / sum; + case SIGN: + computeParameterisedSymbolicBoundsForSign( coeff ); + break; + + case LEAKY_RELU: + computeParameterisedSymbolicBoundsForLeakyRelu( coeff ); + break; + + default: + computeSymbolicBounds(); + break; + } } -double Layer::softmaxdLSELowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) +void Layer::computeParameterisedSymbolicBoundsForRelu( double coeff ) { - double val = 0; - if ( i == di ) - val += softmaxLSELowerBound( inputMids, inputLbs, inputUbs, i ); - - double ldi = inputLbs[di]; - double udi = inputUbs[di]; + ASSERT( coeff >= 0 && coeff <= 1 ); + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) + for ( unsigned i = 0; i < _size; ++i ) { - double lj = inputLbs[j]; - double uj = inputUbs[j]; - double xj = inputMids[j]; + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; - sum += - ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } } - val -= std::exp( inputMids[i] ) / ( sum * sum ) * ( std::exp( udi ) - std::exp( ldi ) ) / - ( udi - ldi ); + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; - return val; -} + /* + There are two ways we can determine that a ReLU has become fixed: -double Layer::softmaxLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - double max = FloatUtils::negativeInfinity(); - unsigned maxInputIndex = 0; - unsigned index = 0; - for ( const auto &mid : inputMids ) - { - if ( mid > max ) - { - max = mid; - maxInputIndex = index; - } - ++index; - } + 1. If the ReLU's variable has been externally fixed + 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) + */ + PhaseStatus reluPhase = PHASE_NOT_FIXED; - if ( maxInputIndex == i ) - return softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); - else - { - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - sum += 1; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - double xjjstar = inputMids[j] - inputMids[maxInputIndex]; + // Has the f variable been eliminated or fixed? + if ( FloatUtils::isPositive( _lb[i] ) ) + reluPhase = RELU_PHASE_ACTIVE; + else if ( FloatUtils::isZero( _ub[i] ) ) + reluPhase = RELU_PHASE_INACTIVE; - sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + - ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); - } - } + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - return std::exp( inputMids[i] - inputMids[maxInputIndex] ) / sum; - } -} + /* + A ReLU initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); -double Layer::softmaxdLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) -{ - double max = FloatUtils::negativeInfinity(); - unsigned maxInputIndex = 0; - unsigned index = 0; - for ( const auto &mid : inputMids ) - { - if ( mid > max ) + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - max = mid; - maxInputIndex = index; + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; } - ++index; - } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - if ( maxInputIndex == i ) - return softmaxdERLowerBound( inputMids, inputLbs, inputUbs, i, di ); - else - { - double val = softmaxLSELowerBound2( inputMids, inputLbs, inputUbs, i ); + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) { - if ( j == maxInputIndex ) - sum += 1; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - double xjjstar = inputMids[j] - inputMids[maxInputIndex]; - sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + - ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); - } + reluPhase = RELU_PHASE_ACTIVE; } - double val2 = std::exp( inputMids[i] - inputMids[maxInputIndex] ) / ( sum * sum ); - - if ( i == di ) + else if ( !FloatUtils::isPositive( sourceUb ) ) { - double ldijstar = inputLbs[i] - inputUbs[maxInputIndex]; - double udijstar = inputUbs[i] - inputLbs[maxInputIndex]; - return val - - val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / ( udijstar - ldijstar ); + reluPhase = RELU_PHASE_INACTIVE; } - else if ( maxInputIndex == di ) + + if ( reluPhase == PHASE_NOT_FIXED ) { - double sum2 = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) + // If we got here, we know that lbLb < 0 and ubUb + // > 0 There are four possible cases, depending on + // whether ubLb and lbUb are negative or positive + // (see Neurify paper, page 14). + + // Upper bound + if ( _symbolicLbOfUb[i] <= 0 ) { - if ( j == maxInputIndex ) - continue; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - sum2 += ( std::exp( ujjstar ) - std::exp( ljjstar ) ) / ( ujjstar - ljjstar ); - } + // lbOfUb[i] < 0 < ubOfUb[i] + // Concretize the upper bound using the Ehler's-like approximation + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] = _symbolicUb[j * _size + i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] = _symbolicUpperBias[i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); + _symbolicUpperBias[i] -= _symbolicLbOfUb[i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); } - return -val + val2 * sum2; + + // Lower bound: y >= coeff * x (varies continuously between y >= 0 and y >= alpha * x). + if ( _symbolicUbOfLb[i] <= 0 ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] *= coeff; + + _symbolicLowerBias[i] *= coeff; + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] = _symbolicLb[j * _size + i] * _symbolicUbOfLb[i] / + ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); + + _symbolicLowerBias[i] = _symbolicLowerBias[i] * _symbolicUbOfLb[i] / + ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); + } + + _symbolicLbOfLb[i] = 0; } else { - double ldijstar = inputLbs[di] - inputUbs[maxInputIndex]; - double udijstar = inputUbs[di] - inputLbs[maxInputIndex]; - return -val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / - ( udijstar - ldijstar ); - } - } -} - -double Layer::softmaxLSEUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; - + // The phase of this ReLU is fixed! + if ( reluPhase == RELU_PHASE_ACTIVE ) + { + // Active ReLU, bounds are propagated as is + } + else + { + // Inactive ReLU, returns zero + _symbolicLbOfLb[i] = 0; + _symbolicUbOfLb[i] = 0; + _symbolicLbOfUb[i] = 0; + _symbolicUbOfUb[i] = 0; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicLowerBias[i] = 0; + _symbolicUpperBias[i] = 0; + } + } + + if ( _symbolicLbOfUb[i] < 0 ) + _symbolicLbOfUb[i] = 0; + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeParameterisedSymbolicBoundsForSign( double coeff ) +{ + ASSERT( coeff >= 0 && coeff <= 1 ); + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + // Eliminate neurons are skipped + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + + continue; + } + + /* + There are two ways we can determine that a Sign has become fixed: + + 1. If the Sign's variable has been externally fixed + 2. lbLb >= 0 (Positive) or ubUb < 0 (Negative) + */ + PhaseStatus signPhase = PHASE_NOT_FIXED; + + // Has the f variable been eliminated or fixed? + if ( !FloatUtils::isNegative( _lb[i] ) ) + signPhase = SIGN_PHASE_POSITIVE; + else if ( FloatUtils::isNegative( _ub[i] ) ) + signPhase = SIGN_PHASE_NEGATIVE; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A Sign initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) + { + signPhase = SIGN_PHASE_POSITIVE; + } + else if ( FloatUtils::isNegative( sourceUb ) ) + { + signPhase = SIGN_PHASE_NEGATIVE; + } + + if ( signPhase == PHASE_NOT_FIXED ) + { + PhaseStatus upperSignPhase = PHASE_NOT_FIXED; + PhaseStatus lowerSignPhase = PHASE_NOT_FIXED; + + // If we got here, we know that lbLb < 0 and ubUb + // > 0 + + // Upper bound + if ( !FloatUtils::isNegative( _symbolicLbOfUb[i] ) ) + { + // The upper bound is strictly positive - turns into + // the constant 1 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] = 0; + + _symbolicUpperBias[i] = 1; + + upperSignPhase = SIGN_PHASE_POSITIVE; + } + else + { + // The upper bound's phase is not fixed, use parameterised + // parallelogram approximation: y >= 2 / l * ( 1 - coeff ) x + 1 + // (varies continuously between y <= 1 and y <= -2 / l * x + 1). + double factor = -2.0 / _symbolicLbOfLb[i] * ( 1 - coeff ); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] *= factor; + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= factor; + _symbolicUpperBias[i] += 1; + } + + // Lower bound + if ( FloatUtils::isNegative( _symbolicUbOfLb[i] ) ) + { + // The lower bound is strictly negative - turns into + // the constant -1 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] = 0; + + _symbolicLowerBias[i] = -1; + + lowerSignPhase = SIGN_PHASE_NEGATIVE; + } + else + { + // The lower bound's phase is not fixed, use parameterised + // parallelogram approximation: y >= 2 / u * ( 1 - coeff ) x - 1 + // (varies continuously between y >= -1 and y >= 2 / u * x - 1). + double factor = 2.0 / _symbolicUbOfUb[i] * ( 1 - coeff ); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= factor; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= factor; + _symbolicLowerBias[i] -= 1; + } + + if (upperSignPhase == PHASE_NOT_FIXED) + { + _symbolicUbOfUb[i] = 1; + _symbolicLbOfUb[i] = -1; + } + else + { + _symbolicUbOfUb[i] = 1; + _symbolicLbOfUb[i] = 1; + } + + if (lowerSignPhase == PHASE_NOT_FIXED) + { + _symbolicUbOfLb[i] = 1; + _symbolicLbOfLb[i] = -1; + } + else + { + _symbolicUbOfLb[i] = -1; + _symbolicLbOfLb[i] = -1; + } + } + else + { + // The phase of this Sign is fixed! + double constant = ( signPhase == SIGN_PHASE_POSITIVE ) ? 1 : -1; + + _symbolicLbOfLb[i] = constant; + _symbolicUbOfLb[i] = constant; + _symbolicLbOfUb[i] = constant; + _symbolicUbOfUb[i] = constant; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicLowerBias[i] = constant; + _symbolicUpperBias[i] = constant; + } + + if ( _symbolicLbOfLb[i] < -1 ) + _symbolicLbOfLb[i] = -1; + if ( _symbolicUbOfUb[i] > 1 ) + _symbolicUbOfUb[i] = 1; + + /* + We now have the tightest bounds we can for the sign + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) +{ + ASSERT( _alpha > 0 && _alpha < 1 ); + ASSERT( coeff >= 0 && coeff <= 1 ); + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + /* + There are two ways we can determine that a LeakyReLU has become fixed: + + 1. If the LeakyReLU's variable has been externally fixed + 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) + */ + PhaseStatus leakyReluPhase = PHASE_NOT_FIXED; + + // Has the f variable been eliminated or fixed? + if ( FloatUtils::isPositive( _lb[i] ) ) + leakyReluPhase = RELU_PHASE_ACTIVE; + else if ( FloatUtils::isZero( _ub[i] ) ) + leakyReluPhase = RELU_PHASE_INACTIVE; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A LeakyReLU initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) + { + leakyReluPhase = RELU_PHASE_ACTIVE; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + leakyReluPhase = RELU_PHASE_INACTIVE; + } + + if ( leakyReluPhase == PHASE_NOT_FIXED ) + { + // LeakyReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + // Concrete upper bound: x_f <= ub_b + double width = sourceUb - sourceLb; + double weight = ( sourceUb - _alpha * sourceLb ) / width; + + if ( _alpha <= 1 ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= weight; + } + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= weight; + _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; + + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We + // use the heuristic described in section 4.1 of + // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + // to set the value of lambda (either 0 or 1 is considered). + if ( sourceUb > sourceLb ) + { + // lambda = 1 + // Symbolic lower bound: x_f >= x_b + // Concrete lower bound: x_f >= sourceLb + + // Lower bounds are passed as is + } + else + { + // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = alpha and lambda = 1). + // Symbolic lower bound: x_f >= ((1 - coeff) * weight + alpha) x_b + // Concrete lower bound: x_f >= 0 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= ( 1 - _alpha ) * coeff + _alpha; + } + + _symbolicLowerBias[i] *= _alpha; + } + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= weight; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= weight; + _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; + + if ( sourceUb > sourceLb ) + { + // Upper bounds are passed as is + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= _alpha; + } + + _symbolicUpperBias[i] *= _alpha; + } + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + } + else + { + // The phase of this LeakyReLU is fixed! + if ( leakyReluPhase == RELU_PHASE_ACTIVE ) + { + // Positive LeakyReLU, bounds are propagated as is + } + else + { + // Negative LeakyReLU, bounds are multiplied by _alpha + _symbolicLbOfLb[i] *= _alpha; + _symbolicUbOfLb[i] *= _alpha; + _symbolicLbOfUb[i] *= _alpha; + _symbolicUbOfUb[i] *= _alpha; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= _alpha; + _symbolicLb[j * _size + i] *= _alpha; + } + + _symbolicLowerBias[i] *= _alpha; + _symbolicUpperBias[i] *= _alpha; + } + } + + if ( _symbolicUbOfUb[i] > sourceUb ) + _symbolicUbOfUb[i] = sourceUb; + if ( _symbolicLbOfLb[i] < _alpha * sourceLb ) + _symbolicLbOfLb[i] = _alpha * sourceLb; + + /* + We now have the tightest bounds we can for the leakyRelu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +double Layer::softmaxLSELowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + double sum = 0; + for ( unsigned j = 0; j < inputs.size(); ++j ) + { + double lj = inputLbs[j]; + double uj = inputUbs[j]; + double xj = inputs[j]; + sum += + ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); + } + + return std::exp( inputs[i] ) / sum; +} + +double Layer::softmaxdLSELowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) +{ + double val = 0; + if ( i == di ) + val += softmaxLSELowerBound( inputMids, inputLbs, inputUbs, i ); + + double ldi = inputLbs[di]; + double udi = inputUbs[di]; + + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + double lj = inputLbs[j]; + double uj = inputUbs[j]; + double xj = inputMids[j]; + + sum += + ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); + } + + val -= std::exp( inputMids[i] ) / ( sum * sum ) * ( std::exp( udi ) - std::exp( ldi ) ) / + ( udi - ldi ); + + return val; +} + +double Layer::softmaxLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + double max = FloatUtils::negativeInfinity(); + unsigned maxInputIndex = 0; + unsigned index = 0; + for ( const auto &mid : inputMids ) + { + if ( mid > max ) + { + max = mid; + maxInputIndex = index; + } + ++index; + } + + if ( maxInputIndex == i ) + return softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); + else + { + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + sum += 1; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + double xjjstar = inputMids[j] - inputMids[maxInputIndex]; + + sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + + ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); + } + } + + return std::exp( inputMids[i] - inputMids[maxInputIndex] ) / sum; + } +} + +double Layer::softmaxdLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) +{ + double max = FloatUtils::negativeInfinity(); + unsigned maxInputIndex = 0; + unsigned index = 0; + for ( const auto &mid : inputMids ) + { + if ( mid > max ) + { + max = mid; + maxInputIndex = index; + } + ++index; + } + + if ( maxInputIndex == i ) + return softmaxdERLowerBound( inputMids, inputLbs, inputUbs, i, di ); + else + { + double val = softmaxLSELowerBound2( inputMids, inputLbs, inputUbs, i ); + + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + sum += 1; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + double xjjstar = inputMids[j] - inputMids[maxInputIndex]; + sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + + ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); + } + } + double val2 = std::exp( inputMids[i] - inputMids[maxInputIndex] ) / ( sum * sum ); + + if ( i == di ) + { + double ldijstar = inputLbs[i] - inputUbs[maxInputIndex]; + double udijstar = inputUbs[i] - inputLbs[maxInputIndex]; + return val - + val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / ( udijstar - ldijstar ); + } + else if ( maxInputIndex == di ) + { + double sum2 = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + continue; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + sum2 += ( std::exp( ujjstar ) - std::exp( ljjstar ) ) / ( ujjstar - ljjstar ); + } + } + return -val + val2 * sum2; + } + else + { + double ldijstar = inputLbs[di] - inputUbs[maxInputIndex]; + double udijstar = inputUbs[di] - inputLbs[maxInputIndex]; + return -val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / + ( udijstar - ldijstar ); + } + } +} + +double Layer::softmaxLSEUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) +{ + double li = outputLb[i]; + double ui = outputUb[i]; + Vector inputTilda; SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); @@ -3650,7 +4294,6 @@ double Layer::softmaxdERUpperBound( const Vector &inputMids, double li = outputLb[i]; double ui = outputUb[i]; - if ( i == di ) { double val2 = -1; @@ -3980,7 +4623,6 @@ String Layer::typeToString( Type type ) return "BILINEAR"; break; - default: return "UNKNOWN TYPE"; break; diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index 4d1990ebc5..965f85ef45 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -2,7 +2,7 @@ /*! \file Layer.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Ido Shmuel + ** Guy Katz ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -137,6 +137,7 @@ class Layer void obtainCurrentBounds( const Query &inputQuery ); void obtainCurrentBounds(); void computeSymbolicBounds(); + void computeParameterisedSymbolicBounds( double coeff ); void computeIntervalArithmeticBounds(); /* @@ -291,6 +292,14 @@ class Layer void computeSymbolicBoundsForSoftmax(); void computeSymbolicBoundsForBilinear(); void computeSymbolicBoundsDefault(); + + + /* + Helper functions for parameterised symbolic bound tightening + */ + void computeParameterisedSymbolicBoundsForRelu( double coeff ); + void computeParameterisedSymbolicBoundsForSign( double coeff ); + void computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ); /* Helper functions for interval bound tightening diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index 04b9b20683..d41ed22de4 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -220,6 +220,12 @@ void NetworkLevelReasoner::symbolicBoundPropagation() _layerIndexToLayer[i]->computeSymbolicBounds(); } +void NetworkLevelReasoner::parameterisedSymbolicBoundPropagation( Map coeffs ) +{ + for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) + _layerIndexToLayer[i]->computeParameterisedSymbolicBounds( coeffs[i] ); +} + void NetworkLevelReasoner::deepPolyPropagation() { if ( _deepPolyAnalysis == nullptr ) @@ -237,6 +243,12 @@ void NetworkLevelReasoner::lpRelaxationPropagation() Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE ) lpFormulator.optimizeBoundsWithLpRelaxation( _layerIndexToLayer, true ); + else if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP ) + lpFormulator.optimizeBoundsWithInvprop( _layerIndexToLayer ); + else if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) + lpFormulator.optimizeBoundsWithPreimageApproximation( _layerIndexToLayer ); else if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::LP_RELAXATION ) lpFormulator.optimizeBoundsWithLpRelaxation( _layerIndexToLayer ); diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index fa125f0116..f84f0f12ea 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -24,9 +24,9 @@ #include "MatrixMultiplication.h" #include "NeuronIndex.h" #include "PiecewiseLinearFunctionType.h" +#include "PolygonalTightening.h" #include "Tightening.h" #include "Vector.h" -#include "PolygonalTightening.h" #include @@ -103,6 +103,12 @@ class NetworkLevelReasoner : public LayerOwner expressions and obtain tighter bounds (e.g., if the upper bound on the upper bound of a ReLU node is negative, that ReLU is inactive and its output can be set to 0. + + - Parametrised Symbolic: For certain activation functions, there + is a continuum of valid symbolic bounds. We receive a map of + coefficients in range [0, 1] for every layer index, then compute + the parameterised symbolic bounds (or default to regular + symbolic bounds if parameterised bounds not implemented). - LP Relaxation: invoking an LP solver on a series of LP relaxations of the problem we're trying to solve, and @@ -131,6 +137,7 @@ class NetworkLevelReasoner : public LayerOwner void obtainCurrentBounds(); void intervalArithmeticBoundPropagation(); void symbolicBoundPropagation(); + void parameterisedSymbolicBoundPropagation( Map coeffs ); void deepPolyPropagation(); void lpRelaxationPropagation(); void LPTighteningForOneLayer( unsigned targetIndex ); From cd5f2dec2c2a1a7203fcae9e30a1bb443c58fd3b Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 10 Oct 2024 20:55:21 +0300 Subject: [PATCH 19/81] 0-6-6: New constants for PreimageApproximation (random seeds). 0-6-6: New constants for PreimageApproximation (random seeds). --- src/configuration/GlobalConfiguration.cpp | 2 ++ src/configuration/GlobalConfiguration.h | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index d8529ee7c1..6cb1f6d8de 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -67,6 +67,8 @@ const unsigned GlobalConfiguration::ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS = const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index ca06a28735..e0daaf133f 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -150,6 +150,12 @@ class GlobalConfiguration // Random seed for generating simulation values. static const unsigned SIMULATION_RANDOM_SEED; + + // Random seed for EstimateVolume procedure (PreimageApproximation). + static const unsigned VOLUME_ESTIMATION_RANDOM_SEED; + + // Random seed for PreimageApproximation optimization. + static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED; // How often should projected steepest edge reset the reference space? static const unsigned PSE_ITERATIONS_BEFORE_RESET; From d86c0a3d1ae2c65c2e1a0d3ff407ab2cda63e03f Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 10 Oct 2024 20:57:24 +0300 Subject: [PATCH 20/81] 0-6-7: TEMPORARY CHANGE - use boost v1.85 for Differential Evolution algorithm (gradient-free optimization for PreimageApproximation). 0-6-7: TEMPORARY CHANGE - use boost v1.85 for Differential Evolution algorithm (gradient-free optimization for PreimageApproximation). --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8db91b2abb..f0d8e1a8e1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -82,7 +82,7 @@ include_directories(SYSTEM ${CVC4_DIR} ${CVC4_DIR}/include) # Avoid using deprecated operations add_definitions(-DBOOST_NO_CXX98_FUNCTION_BASE) -set(BOOST_VERSION 1.84.0) +set(BOOST_VERSION 1.85.0) set(BOOST_DIR "${TOOLS_DIR}/boost-${BOOST_VERSION}") if (MSVC) set(BOOST_ROOT "${BOOST_DIR}/win_installed") From 4467e09cf655f448e9410717edcff365a61eac1b Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 14 Oct 2024 12:14:36 +0300 Subject: [PATCH 21/81] 0-6-8: Revert last change. 0-6-8: Revert last change. --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f0d8e1a8e1..8db91b2abb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -82,7 +82,7 @@ include_directories(SYSTEM ${CVC4_DIR} ${CVC4_DIR}/include) # Avoid using deprecated operations add_definitions(-DBOOST_NO_CXX98_FUNCTION_BASE) -set(BOOST_VERSION 1.85.0) +set(BOOST_VERSION 1.84.0) set(BOOST_DIR "${TOOLS_DIR}/boost-${BOOST_VERSION}") if (MSVC) set(BOOST_ROOT "${BOOST_DIR}/win_installed") From 88e339a587940d40c8710408a15d7211a2d668e7 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 14 Oct 2024 12:17:18 +0300 Subject: [PATCH 22/81] 0-6-9: Adapt DifferentialEvolution from Boost v1.85. 0-6-9: Adapt DifferentialEvolution from Boost v1.85. --- src/nlr/DifferentialEvolution.h | 528 ++++++++++++++++++++++++++++++++ 1 file changed, 528 insertions(+) create mode 100644 src/nlr/DifferentialEvolution.h diff --git a/src/nlr/DifferentialEvolution.h b/src/nlr/DifferentialEvolution.h new file mode 100644 index 0000000000..030f338a84 --- /dev/null +++ b/src/nlr/DifferentialEvolution.h @@ -0,0 +1,528 @@ +/* + * Copyright Nick Thompson, 2024 + * Use, modification and distribution are subject to the + * Boost Software License, Version 1.0. (See accompanying file + * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) + */ +#ifndef DIFFERENTIAL_EVOLUTION +#define DIFFERENTIAL_EVOLUTION + +#include "FloatUtils.h" + +#include // for std::sort +#include +#include +#include +#include +#include +#include +#include +#include +#include // for std::false_type +#include +#include + +namespace NLR { + + template struct has_resize : std::false_type {}; + + template struct has_resize().resize( size_t{} ) )>> : std::true_type {}; + + template constexpr bool has_resize_v = has_resize::value; + + template + void validate_bounds( ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds ) + { + std::ostringstream oss; + + if ( lower_bounds.size() == 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The dimension of the problem cannot be zero."; + throw std::domain_error( oss.str() ); + } + + if ( upper_bounds.size() != lower_bounds.size() ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be the same number of lower bounds as upper bounds, but given "; + oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() << " lower bounds."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < lower_bounds.size(); ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + if ( lb > ub ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be greater than or equal to the lower bound, but the upper bound is " << ub + << " and the lower is " << lb << "."; + throw std::domain_error( oss.str() ); + } + + if ( !FloatUtils::isFinite( lb ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The lower bound must be finite, but got " << lb << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); + } + + if ( !FloatUtils::isFinite( ub ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be finite, but got " << ub << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); + } + } + } + + template + std::vector random_initial_population( ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds, + size_t initial_population_size, URBG &&gen ) + { + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + constexpr bool has_resize = has_resize_v; + std::vector population( initial_population_size ); + auto const dimension = lower_bounds.size(); + + for ( size_t i = 0; i < population.size(); ++i ) + { + if constexpr ( has_resize ) + population[i].resize( dimension ); + else + { + // Argument type must be known at compile-time; like std::array: + if ( population[i].size() != dimension ) + { + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": For containers which do not have resize, the default size must be the same as the dimension, "; + oss << "but the default container size is " << population[i].size() << " and the dimension of the problem is " + << dimension << "."; + oss << " The function argument container type is " << typeid( ArgumentContainer ).name() << ".\n"; + throw std::runtime_error( oss.str() ); + } + } + } + + // Why don't we provide an option to initialize with (say) a Gaussian distribution? + // > If the optimum's location is fairly well known, + // > a Gaussian distribution may prove somewhat faster, although it + // > may also increase the probability that the population will converge prematurely. + // > In general, uniform distributions are preferred, since they best reflect + // > the lack of knowledge about the optimum's location. + // - Differential Evolution: A Practical Approach to Global Optimization + // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are preferable. + // So this is something that could be investigated and potentially improved. + std::uniform_real_distribution dis( DimensionlessReal( 0 ), DimensionlessReal( 1 ) ); + for ( size_t i = 0; i < population.size(); ++i ) + { + for ( size_t j = 0; j < dimension; ++j ) + { + auto const &lb = lower_bounds[j]; + auto const &ub = upper_bounds[j]; + population[i][j] = lb + dis( gen ) * ( ub - lb ); + } + } + + return population; + } + + template + void validate_initial_guess( ArgumentContainer const &initial_guess, + ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds) + { + std::ostringstream oss; + auto const dimension = lower_bounds.size(); + + if ( initial_guess.size() != dimension ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The initial guess must have the same dimensions as the problem,"; + oss << ", but the problem size is " << dimension << " and the initial guess has " << initial_guess.size() + << " elements."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < dimension; ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + + if ( !FloatUtils::isFinite( initial_guess[i] ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": At index " << i << ", the initial guess is " << initial_guess[i] + << ", make sure all elements of the initial guess are finite."; + throw std::domain_error( oss.str() ); + } + + if ( initial_guess[i] < lb || initial_guess[i] > ub ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": At index " << i << " the initial guess " << initial_guess[i] << " is not in the bounds [" << lb << ", " + << ub << "]."; + throw std::domain_error( oss.str() ); + } + } + } + + // Return indices corresponding to the minimum function values. + template + std::vector best_indices( std::vector const &function_values ) + { + const size_t n = function_values.size(); + std::vector indices( n ); + + for ( size_t i = 0; i < n; ++i ) + { + indices[i] = i; + } + + std::sort( indices.begin(), + indices.end(), + [&]( size_t a, size_t b ) + { + if ( FloatUtils::isNan( function_values[a] ) ) + { + return false; + } + if ( FloatUtils::isNan( function_values[b] ) ) + { + return true; + } + return function_values[a] < function_values[b]; + }); + + return indices; + } + + template + auto weighted_lehmer_mean( RandomAccessContainer const & values, + RandomAccessContainer const & weights) + { + if ( values.size() != weights.size() ) + { + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be the same number of weights as values, but got " << values.size() + << " values and " << weights.size() << " weights."; + throw std::logic_error( oss.str() ); + } + + if ( values.size() == 0 ) + { + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must at least one value provided."; + throw std::logic_error( oss.str() ); + } + + using Real = typename RandomAccessContainer::value_type; + Real numerator = 0; + Real denominator = 0; + + for ( size_t i = 0; i < values.size(); ++i ) + { + if ( weights[i] < 0 || !FloatUtils::isFinite( weights[i] ) ) + { + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": All weights must be positive and finite, but got received weight " << weights[i] + << " at index " << i << " of " << weights.size() << "."; + throw std::domain_error( oss.str() ); + } + + Real tmp = weights[i] * values[i]; + numerator += tmp * values[i]; + denominator += tmp; + } + return numerator / denominator; + } + + // Storn, R., Price, K. (1997). Differential evolution-a simple and efficient heuristic for global optimization over + // continuous spaces. + // Journal of global optimization, 11, 341-359. + // See: + // https://www.cp.eng.chula.ac.th/~prabhas//teaching/ec/ec2012/storn_price_de.pdf + + // We provide the parameters in a struct-there are too many of them and they are too unwieldy to pass individually: + template struct differential_evolution_parameters + { + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + ArgumentContainer lower_bounds; + ArgumentContainer upper_bounds; + + // mutation factor is also called scale factor or just F in the literature: + DimensionlessReal mutation_factor = static_cast( 0.65 ); + DimensionlessReal crossover_probability = static_cast( 0.5 ); + + // Population in each generation: + size_t NP = 500; + size_t max_generations = 1000; + ArgumentContainer const *initial_guess = nullptr; + unsigned threads = std::thread::hardware_concurrency(); + }; + + template + void validate_differential_evolution_parameters( differential_evolution_parameters const &de_params ) + { + std::ostringstream oss; + validate_bounds( de_params.lower_bounds, de_params.upper_bounds ); + if ( de_params.NP < 4 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The population size must be at least 4, but requested population size of " << de_params.NP << "."; + throw std::invalid_argument( oss.str() ); + } + + // From: "Differential Evolution: A Practical Approach to Global Optimization (Natural Computing Series)" + // > The scale factor, F in (0,1+), is a positive real number that controls the rate at which the population evolves. + // > While there is no upper limit on F, effective values are seldom greater than 1.0. + // ... + // Also see "Limits on F", Section 2.5.1: + // > This discontinuity at F = 1 reduces the number of mutants by half and can result in erratic convergence... + auto F = de_params.mutation_factor; + if ( FloatUtils::isNan( F ) || F >= 1 || F <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": F in (0, 1) is required, but got F=" << F << "."; + throw std::domain_error( oss.str() ); + } + + if ( de_params.max_generations < 1 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be at least one generation."; + throw std::invalid_argument( oss.str() ); + } + + if ( de_params.initial_guess ) + { + validate_initial_guess( *de_params.initial_guess, de_params.lower_bounds, de_params.upper_bounds ); + } + + if ( de_params.threads == 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be at least one thread."; + throw std::invalid_argument( oss.str() ); + } + } + + template + ArgumentContainer differential_evolution( const Func cost_function, + differential_evolution_parameters const &de_params, + URBG &gen, + std::invoke_result_t target_value + = std::numeric_limits>::quiet_NaN(), + std::atomic *cancellation = nullptr, + std::vector>> *queries = nullptr, + std::atomic> *current_minimum_cost = nullptr) + { + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + using ResultType = std::invoke_result_t; + + validate_differential_evolution_parameters( de_params ); + const size_t dimension = de_params.lower_bounds.size(); + auto NP = de_params.NP; + auto population = random_initial_population( de_params.lower_bounds, de_params.upper_bounds, NP, gen ); + + if ( de_params.initial_guess ) + { + population[0] = *de_params.initial_guess; + } + + std::vector cost( NP, std::numeric_limits::quiet_NaN() ); + std::atomic target_attained = false; + + // This mutex is only used if the queries are stored: + std::mutex mt; + + std::vector thread_pool; + auto const threads = de_params.threads; + for ( size_t j = 0; j < threads; ++j ) + { + // Note that if some members of the population take way longer to compute, + // then this parallelization strategy is very suboptimal. + // However, we tried using std::async (which should be robust to this particular problem), + // but the overhead was just totally unacceptable on ARM Macs (the only platform tested). + // As the economists say "there are no solutions, only tradeoffs". + + thread_pool.emplace_back( [&, j]() + { + for ( size_t i = j; i < cost.size(); i += threads ) + { + cost[i] = cost_function( population[i] ); + if ( current_minimum_cost && cost[i] < *current_minimum_cost ) + { + *current_minimum_cost = cost[i]; + } + if ( queries ) + { + std::scoped_lock lock( mt ); + queries->push_back( std::make_pair( population[i], cost[i] ) ); + } + + if ( !FloatUtils::isNan( target_value ) && cost[i] <= target_value ) + { + target_attained = true; + } + } + }); + } + + for ( auto &thread : thread_pool ) + { + thread.join(); + } + + std::vector trial_vectors( NP ); + for ( size_t i = 0; i < NP; ++i ) + { + if constexpr ( has_resize_v ) + { + trial_vectors[i].resize( dimension ); + } + } + + std::vector thread_generators( threads ); + for ( size_t j = 0; j < threads; ++j ) + { + thread_generators[j].seed( gen() ); + } + + // std::vector isn't threadsafe! + std::vector updated_indices( NP, 0 ); + + for ( size_t generation = 0; generation < de_params.max_generations; ++generation ) + { + if ( cancellation && *cancellation ) + { + break; + } + if ( target_attained ) + { + break; + } + + thread_pool.resize( 0 ); + for ( size_t j = 0; j < threads; ++j ) + { + thread_pool.emplace_back( [&, j]() + { + auto& tlg = thread_generators[j]; + std::uniform_real_distribution unif01( DimensionlessReal( 0 ), DimensionlessReal( 1 ) ); + + for ( size_t i = j; i < cost.size(); i += threads ) + { + if ( target_attained ) + { + return; + } + if ( cancellation && *cancellation ) + { + return; + } + + size_t r1, r2, r3; + do + { + r1 = tlg() % NP; + } while ( r1 == i ); + + do + { + r2 = tlg() % NP; + } while ( r2 == i || r2 == r1 ); + + do + { + r3 = tlg() % NP; + } while ( r3 == i || r3 == r2 || r3 == r1 ); + + + for ( size_t k = 0; k < dimension; ++k ) + { + // See equation (4) of the reference: + auto guaranteed_changed_idx = tlg() % dimension; + if ( unif01( tlg ) < de_params.crossover_probability || k == guaranteed_changed_idx ) + { + auto tmp = population[r1][k] + de_params.mutation_factor * ( population[r2][k] - population[r3][k] ); + auto const &lb = de_params.lower_bounds[k]; + auto const &ub = de_params.upper_bounds[k]; + // Some others recommend regenerating the indices rather than clamping; + // I dunno seems like it could get stuck regenerating . . . + trial_vectors[i][k] = std::clamp( tmp, lb, ub ); + } + else + { + trial_vectors[i][k] = population[i][k]; + } + } + + auto const trial_cost = cost_function( trial_vectors[i] ); + if ( FloatUtils::isNan( trial_cost ) ) + { + continue; + } + if ( queries ) + { + std::scoped_lock lock( mt ); + queries->push_back( std::make_pair( trial_vectors[i], trial_cost ) ); + } + + if ( trial_cost < cost[i] || FloatUtils::isNan( cost[i] ) ) + { + cost[i] = trial_cost; + if ( !FloatUtils::isNan( target_value ) && cost[i] <= target_value ) + { + target_attained = true; + } + + if ( current_minimum_cost && cost[i] < *current_minimum_cost ) + { + *current_minimum_cost = cost[i]; + } + + // Can't do this! It's a race condition! + //population[i] = trial_vectors[i]; + // Instead mark all the indices that need to be updated: + updated_indices[i] = 1; + } + } + }); + } + for ( auto &thread : thread_pool ) + { + thread.join(); + } + + for ( size_t i = 0; i < NP; ++i ) + { + if ( updated_indices[i] ) + { + population[i] = trial_vectors[i]; + updated_indices[i] = 0; + } + } + } + + auto it = std::min_element( cost.begin(), cost.end() ); + return population[std::distance( cost.begin(), it )]; + } + +} // namespace NLR +#endif From 8031a41a30eb232f16719b3aff277b568afa429c Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 14 Oct 2024 12:19:19 +0300 Subject: [PATCH 23/81] 0-6-10: Current state of PreimageApproximation (EstimateVolume implemented). 0-6-10: Current state of PreimageApproximation (EstimateVolume implemented). --- src/nlr/LPFormulator.cpp | 67 ++++++++++++++++++++++++++++++++++------ src/nlr/LPFormulator.h | 2 +- src/nlr/Layer.cpp | 41 ++++++++++++++++++++++-- src/nlr/Layer.h | 3 +- 4 files changed, 99 insertions(+), 14 deletions(-) diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 6876586a57..6b7b39c929 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -339,12 +339,7 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map void LPFormulator::optimizeBoundsWithInvprop( const Map &layers ) { - Map coeffs; - for ( const auto &pair : layers ) - { - //coeffs.insert( pair.first, GlobalConfiguration::INVPROP_INITIAL_ALPHA ); - coeffs.insert( pair.first, 0.5 ); - } + } void LPFormulator::optimizeBoundsWithPreimageApproximation( const Map &layers ) @@ -353,7 +348,7 @@ void LPFormulator::optimizeBoundsWithPreimageApproximation( const Map>(); + auto opt_params = differential_evolution_parameters>(); unsigned depth = layers.size(); opt_params.lower_bounds.resize( depth, 0 ); opt_params.upper_bounds.resize( depth, 1 ); @@ -361,14 +356,66 @@ void LPFormulator::optimizeBoundsWithPreimageApproximation( const MapcomputeParameterisedSymbolicBounds( optimal_coeffs[i], true ); + optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs ); } double LPFormulator::EstimateVolume( const Map &layers, std::vector coeffs ) { - return 0; + // First, run parameterised symbolic bound propagation. + for ( unsigned i = 0; i < layers.size(); ++i ) + layers[i]->computeParameterisedSymbolicBounds( coeffs[i] ); + + std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); + double average = 0; + for ( unsigned i = 0; i < GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; ++i ) + { + + // Sample point from known bounds. + Map point; + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + const Layer *layer = pair.second; + for ( unsigned index = 0; index < layer->getSize(); ++index ) + { + if ( layer->neuronEliminated( index ) ) + continue; + + const NeuronIndex neuron( layerIndex, index ); + double lb = layer->getLb( index ); + double ub = layer->getUb( index ); + std::uniform_real_distribution<> dis( lb, ub ); + point.insert( neuron , dis( rng ) ); + } + } + + // Compute the average sigmoid of log-sum-exp of points' difference from bounding hyperplanes. + double sum_exp = 0; + for ( auto pair : layers ) + { + const Layer *layer = pair.second; + for ( unsigned index = 0; index < layer->getSize(); ++index ) + { + if ( layer->neuronEliminated( index ) ) + continue; + + double margin = layer->calculateDifferenceFromSymbolic( point, index ); + if ( FloatUtils::wellFormed( std::exp( margin ) ) ) + sum_exp += std::exp( margin ); + } + } + + double slse = 1 / ( 1 + ( 1 / sum_exp ) ); + average += slse / GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; + } + + return average; } void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index 4184839d9c..64fd022fc4 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -16,6 +16,7 @@ #ifndef __LPFormulator_h__ #define __LPFormulator_h__ +#include "DifferentialEvolution.h" #include "GurobiWrapper.h" #include "LayerOwner.h" #include "Map.h" @@ -26,7 +27,6 @@ #include #include #include -#include #include #include diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 6fdb081cba..644c056f8d 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -305,7 +305,7 @@ void Layer::computeSimulations() unsigned sourceSize = sourceLayerEntry.second; const double *weights = _layerToWeights[sourceLayerEntry.first]; - for ( unsigned i = 0; i < _size; i++ ) + for ( unsigned i = 0; i < _size; ++i ) { for ( unsigned j = 0; j < simulationSize; ++j ) _simulations[i][j] = _bias[i]; @@ -3362,7 +3362,23 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } -void Layer::computeParameterisedSymbolicBounds( double coeff ) +double Layer::calculateDifferenceFromSymbolic( Map &point, unsigned i ) const +{ + double lower_sum = _symbolicLowerBias[i]; + double upper_sum = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + const NeuronIndex neuron( 0, j ); + lower_sum += _symbolicLb[j * _size + i] * point[neuron]; + upper_sum += _symbolicUb[j * _size + i] * point[neuron]; + } + + const NeuronIndex currentNeuron( _layerIndex, i ); + return FloatUtils::max( point[currentNeuron] - upper_sum , 0 ) + FloatUtils::max( lower_sum - point[currentNeuron] , 0 ); +} + +void Layer::computeParameterisedSymbolicBounds( double coeff, bool receive ) { switch ( _type ) { @@ -3382,6 +3398,27 @@ void Layer::computeParameterisedSymbolicBounds( double coeff ) computeSymbolicBounds(); break; } + + if ( receive ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + Map lower_polygonal_tightening; + Map upper_polygonal_tightening; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + const NeuronIndex neuron( 0, j ); + lower_polygonal_tightening.insert( neuron, _symbolicLb[j * _size + i] ); + upper_polygonal_tightening.insert( neuron, _symbolicUb[j * _size + i] ); + } + + _layerOwner->receivePolygonalTighterBound( + PolygonalTightening( lower_polygonal_tightening, _symbolicLowerBias[i], PolygonalTightening::LB ) ); + _layerOwner->receivePolygonalTighterBound( + PolygonalTightening( upper_polygonal_tightening, _symbolicUpperBias[i], PolygonalTightening::UB ) ); + } + } } void Layer::computeParameterisedSymbolicBoundsForRelu( double coeff ) diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index 965f85ef45..aa318e9181 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -137,7 +137,8 @@ class Layer void obtainCurrentBounds( const Query &inputQuery ); void obtainCurrentBounds(); void computeSymbolicBounds(); - void computeParameterisedSymbolicBounds( double coeff ); + void computeParameterisedSymbolicBounds( double coeff, bool receive = false ); + double calculateDifferenceFromSymbolic( Map &point, unsigned i ) const; void computeIntervalArithmeticBounds(); /* From dd584e7adb57e0512c3f640459b2924814aa809d Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 24 Oct 2024 20:12:00 +0300 Subject: [PATCH 24/81] 0-6-11: Add constants for PreimageApproximation. 0-6-11: Add constants for PreimageApproximation. --- src/configuration/GlobalConfiguration.cpp | 5 ++++- src/configuration/GlobalConfiguration.h | 9 +++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index 6cb1f6d8de..1a7d133f40 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -69,6 +69,9 @@ const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 1000; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 1000; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.1; const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; @@ -83,7 +86,7 @@ const bool GlobalConfiguration::PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCES const bool GlobalConfiguration::NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; const double GlobalConfiguration::PREPROCESSOR_ALMOST_FIXED_THRESHOLD = 0.00001; -const unsigned GlobalConfiguration::PREPROCESSSING_MAX_TIGHTEING_ROUND = 1000; +const unsigned GlobalConfiguration::PREPROCESSSING_MAX_TIGHTEING_ROUND = 10; const bool GlobalConfiguration::WARM_START = false; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index e0daaf133f..a779b93937 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -154,8 +154,17 @@ class GlobalConfiguration // Random seed for EstimateVolume procedure (PreimageApproximation). static const unsigned VOLUME_ESTIMATION_RANDOM_SEED; + // Number of iterations for EstimateVolume procedure (PreimageApproximation). + static const unsigned VOLUME_ESTIMATION_ITERATIONS; + // Random seed for PreimageApproximation optimization. static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED; + + // Maximum iterations for PreimageApproximation optimization. + static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + + // Step size for PreimageApproximation optimization. + static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; // How often should projected steepest edge reset the reference space? static const unsigned PSE_ITERATIONS_BEFORE_RESET; From cd52524f05f6be9fa3f65d2cef029143db42086c Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 24 Oct 2024 20:14:20 +0300 Subject: [PATCH 25/81] 0-6-12: Current state of PreimageApproximation (implement coordinate descent). 0-6-12: Current state of PreimageApproximation (implement coordinate descent). --- src/nlr/CoordinateDescent.h | 276 ++++++++++++++++++++++++++++++++++++ src/nlr/LPFormulator.cpp | 5 +- src/nlr/LPFormulator.h | 2 +- 3 files changed, 279 insertions(+), 4 deletions(-) create mode 100644 src/nlr/CoordinateDescent.h diff --git a/src/nlr/CoordinateDescent.h b/src/nlr/CoordinateDescent.h new file mode 100644 index 0000000000..dd9d1c0fd1 --- /dev/null +++ b/src/nlr/CoordinateDescent.h @@ -0,0 +1,276 @@ +/* + * Copyright Nick Thompson, 2024 + * Use, modification and distribution are subject to the + * Boost Software License, Version 1.0. (See accompanying file + * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) + */ +#ifndef COORDINATE_DESCENT +#define COORDINATE_DESCENT + +#include "FloatUtils.h" +#include "GlobalConfiguration.h" + +#include // for std::sort +#include +#include +#include +#include +#include +#include +#include // for std::false_type +#include +#include + +namespace NLR { + + template struct has_resize : std::false_type {}; + + template struct has_resize().resize( size_t{} ) )>> : std::true_type {}; + + template constexpr bool has_resize_v = has_resize::value; + + template + void validate_bounds( ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds ) + { + std::ostringstream oss; + + if ( lower_bounds.size() == 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The dimension of the problem cannot be zero."; + throw std::domain_error( oss.str() ); + } + + if ( upper_bounds.size() != lower_bounds.size() ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be the same number of lower bounds as upper bounds, but given "; + oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() << " lower bounds."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < lower_bounds.size(); ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + if ( lb > ub ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be greater than or equal to the lower bound, but the upper bound is " << ub + << " and the lower is " << lb << "."; + throw std::domain_error( oss.str() ); + } + + if ( !FloatUtils::isFinite( lb ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The lower bound must be finite, but got " << lb << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); + } + + if ( !FloatUtils::isFinite( ub ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be finite, but got " << ub << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); + } + } + } + + template + std::vector random_initial_population( ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds, + size_t initial_population_size, URBG &&gen ) + { + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + constexpr bool has_resize = has_resize_v; + std::vector population( initial_population_size ); + auto const dimension = lower_bounds.size(); + + for ( size_t i = 0; i < population.size(); ++i ) + { + if constexpr ( has_resize ) + population[i].resize( dimension ); + else + { + // Argument type must be known at compile-time; like std::array: + if ( population[i].size() != dimension ) + { + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": For containers which do not have resize, the default size must be the same as the dimension, "; + oss << "but the default container size is " << population[i].size() << " and the dimension of the problem is " + << dimension << "."; + oss << " The function argument container type is " << typeid( ArgumentContainer ).name() << ".\n"; + throw std::runtime_error( oss.str() ); + } + } + } + + // Why don't we provide an option to initialize with (say) a Gaussian distribution? + // > If the optimum's location is fairly well known, + // > a Gaussian distribution may prove somewhat faster, although it + // > may also increase the probability that the population will converge prematurely. + // > In general, uniform distributions are preferred, since they best reflect + // > the lack of knowledge about the optimum's location. + // - Differential Evolution: A Practical Approach to Global Optimization + // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are preferable. + // So this is something that could be investigated and potentially improved. + std::uniform_real_distribution dis( DimensionlessReal( 0 ), DimensionlessReal( 1 ) ); + for ( size_t i = 0; i < population.size(); ++i ) + { + for ( size_t j = 0; j < dimension; ++j ) + { + auto const &lb = lower_bounds[j]; + auto const &ub = upper_bounds[j]; + population[i][j] = lb + dis( gen ) * ( ub - lb ); + } + } + + return population; + } + + template + void validate_initial_guess( ArgumentContainer const &initial_guess, + ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds) + { + std::ostringstream oss; + auto const dimension = lower_bounds.size(); + + if ( initial_guess.size() != dimension ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The initial guess must have the same dimensions as the problem,"; + oss << ", but the problem size is " << dimension << " and the initial guess has " << initial_guess.size() + << " elements."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < dimension; ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + + if ( !FloatUtils::isFinite( initial_guess[i] ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": At index " << i << ", the initial guess is " << initial_guess[i] + << ", make sure all elements of the initial guess are finite."; + throw std::domain_error( oss.str() ); + } + + if ( initial_guess[i] < lb || initial_guess[i] > ub ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": At index " << i << " the initial guess " << initial_guess[i] << " is not in the bounds [" << lb << ", " + << ub << "]."; + throw std::domain_error( oss.str() ); + } + } + } + + // Single-Threaded Coordinate Descent + + // We provide the parameters in a struct-there are too many of them and they are too unwieldy to pass individually: + template struct coordinate_descent_parameters + { + using Real = typename ArgumentContainer::value_type; + ArgumentContainer lower_bounds; + ArgumentContainer upper_bounds; + + Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + size_t max_iterations = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + ArgumentContainer const *initial_guess = nullptr; + }; + + template + void validate_coordinate_descent_parameters( coordinate_descent_parameters const &cd_params ) + { + std::ostringstream oss; + validate_bounds( cd_params.lower_bounds, cd_params.upper_bounds ); + + if ( FloatUtils::isNan( cd_params.step_size ) || cd_params.step_size <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": step_size > 0 is required, but got step_size=" << cd_params.step_size << "."; + throw std::domain_error( oss.str() ); + } + + if ( cd_params.max_iterations < 1 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be at least one generation."; + throw std::invalid_argument( oss.str() ); + } + + if ( cd_params.initial_guess ) + { + validate_initial_guess( *cd_params.initial_guess, cd_params.lower_bounds, cd_params.upper_bounds ); + } + } + + template + ArgumentContainer coordinate_descent( const Func cost_function, + coordinate_descent_parameters const &cd_params, + URBG &gen, + std::invoke_result_t target_value, + bool *cancellation = nullptr, + std::vector>> *queries = nullptr, + std::invoke_result_t *current_minimum_cost = nullptr) + { + using ResultType = std::invoke_result_t; + + validate_coordinate_descent_parameters( cd_params ); + const size_t dimension = cd_params.lower_bounds.size(); + const size_t candidates_number = 2 * dimension; + auto step_size = cd_params.step_size; + auto max_iterations = cd_params.max_iterations; + auto guess = random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); + + if ( cd_params.initial_guess ) + { + guess[0] = *cd_params.initial_guess; + } + + std::vector candidates( candidates_number ); + std::vector costs( candidates_number ); + ArgumentContainer current_minimum = guess[0]; + + for ( size_t i = 0; i < max_iterations; ++i ) + { + for ( size_t j = 0; j < candidates_number; ++j) + { + candidates[j] = guess[0]; + size_t index = j / 2; + size_t sign = ( j % 2 == 0 ? 1 : -1 ); + candidates[j][index] += sign * step_size; + + costs[j] = cost_function( candidates[j] ); + + if ( current_minimum_cost && costs[j] < *current_minimum_cost ) + { + *current_minimum_cost = costs[j]; + current_minimum = candidates[j]; + } + + if ( !FloatUtils::isNan( target_value ) && costs[j] <= target_value ) + { + break; + } + } + + guess[0] = current_minimum; + } + + return guess[0]; + } + +} // namespace NLR +#endif diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 6b7b39c929..2189b563ca 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -348,16 +348,15 @@ void LPFormulator::optimizeBoundsWithPreimageApproximation( const Map>(); + auto opt_params = coordinate_descent_parameters>(); unsigned depth = layers.size(); opt_params.lower_bounds.resize( depth, 0 ); opt_params.upper_bounds.resize( depth, 1 ); - opt_params.threads = 1; double value_to_reach = 0; std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); // Find optimal coeffs and run final parameterised symbolic bound tightening + backward LP relaxation. - auto optimal_coeffs = differential_evolution( EstimateVolumeBind, opt_params, rng, value_to_reach ); + auto optimal_coeffs = coordinate_descent( EstimateVolumeBind, opt_params, rng, value_to_reach ); for ( unsigned i = 0; i < layers.size(); ++i ) layers[i]->computeParameterisedSymbolicBounds( optimal_coeffs[i], true ); diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index 64fd022fc4..41f5ac1a3f 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -16,7 +16,7 @@ #ifndef __LPFormulator_h__ #define __LPFormulator_h__ -#include "DifferentialEvolution.h" +#include "CoordinateDescent.h" #include "GurobiWrapper.h" #include "LayerOwner.h" #include "Map.h" From b71935947851c97babcbdcddda20458709c91b26 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 24 Oct 2024 20:16:45 +0300 Subject: [PATCH 26/81] 0-6-13: Timing PreimageApproximation with coordinate descent (very slow). 0-6-13: Timing PreimageApproximation with coordinate descent (very slow). --- src/nlr/tests/Test_NetworkLevelReasoner.h | 128 ++++++++++++++++++++++ 1 file changed, 128 insertions(+) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index bc4d620609..0c16ae1601 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -12177,6 +12177,134 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } + void test_preimage_approximation_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List polygonal_bounds; + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintPolygonalTightenings( polygonal_bounds ) ); + for ( auto &bound : polygonal_bounds ) + { + bound.dump(); + } + /*TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) );*/ + } + bool boundsEqual( const List &bounds, const List &expectedBounds ) { if ( bounds.size() != expectedBounds.size() ) From f29ccf08e8f367405d1fc590ed50b3a55ada9580 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Wed, 6 Nov 2024 11:19:24 +0200 Subject: [PATCH 27/81] 0-6-14: Small Fix (same as 0-5---4). 0-6-14: Small Fix (same as 0-5---4). --- src/engine/Engine.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index d41ac92dd7..534ca5f9b9 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -1579,7 +1579,9 @@ void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) // TODO: Remove this block after getting ready to support sigmoid with MILP Bound // Tightening. - if ( _milpSolverBoundTighteningType != MILPSolverBoundTighteningType::NONE && + if ( ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::MILP_ENCODING || + _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL || + _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION ) && _preprocessedQuery->getNonlinearConstraints().size() > 0 ) throw MarabouError( MarabouError::FEATURE_NOT_YET_SUPPORTED, "Marabou doesn't support sigmoid with MILP Bound Tightening" ); From 0069af2f3995161d6772316aacb54513ccf02a04 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Dec 2024 20:50:22 +0200 Subject: [PATCH 28/81] 0-6--14: Current progress in PreimageApproximation (0-5--6 fixes, optimization algorithms, parameters, clang-format,) - Apply 0-5--6 fixes (Backward-Forward Algorithm branch) to this branch. - Add new optimization algorithms for PreimageApproximation: Coordinate Descent, Gradient Descent, Adam Optimizer. - Change default algorithm (Adam) + parameters to reduce runtime. - Apply clang-format to all files changed. --- src/nlr/AdamOptimizer.h | 354 + src/nlr/CoordinateDescent.h | 426 +- src/nlr/DifferentialEvolution.h | 875 +- src/nlr/Engine.cpp | 3813 +++++++ src/nlr/GlobalConfiguration.cpp | 238 + src/nlr/GlobalConfiguration.h | 315 + src/nlr/GradientDescent.h | 315 + src/nlr/LPFormulator.cpp | 336 +- src/nlr/LPFormulator.h | 81 +- src/nlr/Layer.cpp | 567 +- src/nlr/Layer.h | 22 +- src/nlr/LayerOwner.h | 2 +- src/nlr/MILPSolverBoundTighteningType.h | 46 + src/nlr/NetworkLevelReasoner.cpp | 3 +- src/nlr/NetworkLevelReasoner.h | 8 +- src/nlr/PolygonalTightening.h | 100 + src/nlr/Test_NetworkLevelReasoner.h | 12348 ++++++++++++++++++++++ 17 files changed, 18755 insertions(+), 1094 deletions(-) create mode 100644 src/nlr/AdamOptimizer.h create mode 100644 src/nlr/Engine.cpp create mode 100644 src/nlr/GlobalConfiguration.cpp create mode 100644 src/nlr/GlobalConfiguration.h create mode 100644 src/nlr/GradientDescent.h create mode 100644 src/nlr/MILPSolverBoundTighteningType.h create mode 100644 src/nlr/PolygonalTightening.h create mode 100644 src/nlr/Test_NetworkLevelReasoner.h diff --git a/src/nlr/AdamOptimizer.h b/src/nlr/AdamOptimizer.h new file mode 100644 index 0000000000..7d97046329 --- /dev/null +++ b/src/nlr/AdamOptimizer.h @@ -0,0 +1,354 @@ +/* + * Copyright Nick Thompson, 2024 + * Use, modification and distribution are subject to the + * Boost Software License, Version 1.0. (See accompanying file + * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) + */ +#ifndef ADAM_OPTIMIZER +#define ADAM_OPTIMIZER + +#include "FloatUtils.h" +#include "GlobalConfiguration.h" + +#include // for std::sort +#include +#include +#include +#include +#include +#include +#include // for std::false_type +#include +#include + +namespace NLR { + +template struct has_resize : std::false_type +{ +}; + +template +struct has_resize().resize( size_t{} ) )>> : std::true_type +{ +}; + +template constexpr bool has_resize_v = has_resize::value; + +template +void validate_bounds( ArgumentContainer const &lower_bounds, ArgumentContainer const &upper_bounds ) +{ + std::ostringstream oss; + + if ( lower_bounds.size() == 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The dimension of the problem cannot be zero."; + throw std::domain_error( oss.str() ); + } + + if ( upper_bounds.size() != lower_bounds.size() ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be the same number of lower bounds as upper bounds, but given "; + oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() + << " lower bounds."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < lower_bounds.size(); ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + if ( lb > ub ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be greater than or equal to the lower bound, but the " + "upper bound is " + << ub << " and the lower is " << lb << "."; + throw std::domain_error( oss.str() ); + } + + if ( !FloatUtils::isFinite( lb ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The lower bound must be finite, but got " << lb << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a " + "standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); + } + + if ( !FloatUtils::isFinite( ub ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be finite, but got " << ub << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a " + "standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); + } + } +} + +template +std::vector random_initial_population( ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds, + size_t initial_population_size, + URBG &&gen ) +{ + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + constexpr bool has_resize = has_resize_v; + std::vector population( initial_population_size ); + auto const dimension = lower_bounds.size(); + + for ( size_t i = 0; i < population.size(); ++i ) + { + if constexpr ( has_resize ) + population[i].resize( dimension ); + else + { + // Argument type must be known at compile-time; like std::array: + if ( population[i].size() != dimension ) + { + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": For containers which do not have resize, the default size must be the " + "same as the dimension, "; + oss << "but the default container size is " << population[i].size() + << " and the dimension of the problem is " << dimension << "."; + oss << " The function argument container type is " + << typeid( ArgumentContainer ).name() << ".\n"; + throw std::runtime_error( oss.str() ); + } + } + } + + // Why don't we provide an option to initialize with (say) a Gaussian distribution? + // > If the optimum's location is fairly well known, + // > a Gaussian distribution may prove somewhat faster, although it + // > may also increase the probability that the population will converge prematurely. + // > In general, uniform distributions are preferred, since they best reflect + // > the lack of knowledge about the optimum's location. + // - Differential Evolution: A Practical Approach to Global Optimization + // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are + // preferable. So this is something that could be investigated and potentially improved. + std::uniform_real_distribution dis( DimensionlessReal( 0 ), + DimensionlessReal( 1 ) ); + for ( size_t i = 0; i < population.size(); ++i ) + { + for ( size_t j = 0; j < dimension; ++j ) + { + auto const &lb = lower_bounds[j]; + auto const &ub = upper_bounds[j]; + population[i][j] = lb + dis( gen ) * ( ub - lb ); + } + } + + return population; +} + +template +void validate_initial_guess( ArgumentContainer const &initial_guess, + ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds ) +{ + std::ostringstream oss; + auto const dimension = lower_bounds.size(); + + if ( initial_guess.size() != dimension ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The initial guess must have the same dimensions as the problem,"; + oss << ", but the problem size is " << dimension << " and the initial guess has " + << initial_guess.size() << " elements."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < dimension; ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + + if ( !FloatUtils::isFinite( initial_guess[i] ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": At index " << i << ", the initial guess is " << initial_guess[i] + << ", make sure all elements of the initial guess are finite."; + throw std::domain_error( oss.str() ); + } + + if ( initial_guess[i] < lb || initial_guess[i] > ub ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": At index " << i << " the initial guess " << initial_guess[i] + << " is not in the bounds [" << lb << ", " << ub << "]."; + throw std::domain_error( oss.str() ); + } + } +} + +// Single-Threaded Adam Optimizer + +// We provide the parameters in a struct-there are too many of them and they are too unwieldy to +// pass individually: +template struct adam_optimizer_parameters +{ + using Real = typename ArgumentContainer::value_type; + ArgumentContainer lower_bounds; + ArgumentContainer upper_bounds; + + Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + Real lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + Real lr = 0.05; + Real epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; + Real beta1 = 0.9; + Real beta2 = 0.999; + Real weight_decay = 0.2; + bool maximize = false; + size_t max_iterations = 100; + ArgumentContainer const *initial_guess = nullptr; +}; + +template +void validate_adam_optimizer_parameters( + adam_optimizer_parameters const &cd_params ) +{ + std::ostringstream oss; + validate_bounds( cd_params.lower_bounds, cd_params.upper_bounds ); + + if ( FloatUtils::isNan( cd_params.step_size ) || cd_params.step_size <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": step_size > 0 is required, but got step_size=" << cd_params.step_size << "."; + throw std::domain_error( oss.str() ); + } + + if ( FloatUtils::isNan( cd_params.epsilon ) || cd_params.epsilon <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": epsilon > 0 is required, but got epsilon=" << cd_params.epsilon << "."; + throw std::domain_error( oss.str() ); + } + + if ( FloatUtils::isNan( cd_params.beta1 ) || cd_params.beta1 <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": beta1 > 0 is required, but got beta1=" << cd_params.beta1 << "."; + throw std::domain_error( oss.str() ); + } + + if ( FloatUtils::isNan( cd_params.beta2 ) || cd_params.beta2 <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": beta2 > 0 is required, but got beta2=" << cd_params.beta2 << "."; + throw std::domain_error( oss.str() ); + } + + if ( FloatUtils::isNan( cd_params.lr ) || cd_params.lr <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": lr > 0 is required, but got lr=" << cd_params.lr << "."; + throw std::domain_error( oss.str() ); + } + + if ( FloatUtils::isNan( cd_params.weight_decay ) || cd_params.weight_decay < 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": weight_decay >= 0 is required, but got weight_decay=" << cd_params.weight_decay + << "."; + throw std::domain_error( oss.str() ); + } + + if ( cd_params.max_iterations < 1 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be at least one generation."; + throw std::invalid_argument( oss.str() ); + } + + if ( cd_params.initial_guess ) + { + validate_initial_guess( + *cd_params.initial_guess, cd_params.lower_bounds, cd_params.upper_bounds ); + } +} + +template +ArgumentContainer adam_optimizer( + const Func cost_function, + adam_optimizer_parameters const &cd_params, + URBG &gen, + std::invoke_result_t target_value, + bool *cancellation = nullptr, + std::vector>> + *queries = nullptr, + std::invoke_result_t *current_minimum_cost = nullptr ) +{ + using ResultType = std::invoke_result_t; + + validate_adam_optimizer_parameters( cd_params ); + const size_t dimension = cd_params.lower_bounds.size(); + auto step_size = cd_params.step_size; + auto max_iterations = cd_params.max_iterations; + auto maximize = cd_params.maximize; + auto epsilon = cd_params.epsilon; + auto beta1 = cd_params.beta1; + auto beta2 = cd_params.beta2; + auto lr = cd_params.lr; + auto weight_decay = cd_params.weight_decay; + auto guess = + random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); + + if ( cd_params.initial_guess ) + { + guess[0] = *cd_params.initial_guess; + } + + std::vector candidates( dimension ); + std::vector gradient( dimension ); + std::vector first_moment( dimension ); + std::vector second_moment( dimension ); + ArgumentContainer current_minimum = guess[0]; + + for ( size_t j = 0; j < dimension; ++j ) + { + first_moment[j] = 0; + second_moment[j] = 0; + } + + for ( size_t i = 0; i < max_iterations; ++i ) + { + for ( size_t j = 0; j < dimension; ++j ) + { + candidates[j] = guess[0]; + candidates[j][j] += step_size; + + size_t sign = ( maximize == false ? 1 : -1 ); + double cost = cost_function( candidates[j] ); + gradient[j] = sign * cost / step_size + weight_decay * guess[0][j]; + + if ( !FloatUtils::isNan( target_value ) && cost <= target_value ) + { + guess[0] = candidates[j]; + break; + } + } + + for ( size_t j = 0; j < dimension; ++j ) + { + first_moment[j] = beta1 * first_moment[j] + ( 1 - beta1 ) * gradient[j]; + first_moment[j] /= ( 1 - std::pow( beta1, step_size ) ); + + second_moment[j] = beta2 * first_moment[j] + ( 1 - beta2 ) * std::pow( gradient[j], 2 ); + second_moment[j] /= ( 1 - std::pow( beta2, step_size ) ); + + guess[0][j] -= lr * first_moment[j] / ( std::sqrt( second_moment[j] ) + epsilon ); + } + } + + return guess[0]; +} + +} // namespace NLR +#endif diff --git a/src/nlr/CoordinateDescent.h b/src/nlr/CoordinateDescent.h index dd9d1c0fd1..83f01ed197 100644 --- a/src/nlr/CoordinateDescent.h +++ b/src/nlr/CoordinateDescent.h @@ -17,260 +17,280 @@ #include #include #include -#include // for std::false_type +#include // for std::false_type #include #include namespace NLR { - template struct has_resize : std::false_type {}; +template struct has_resize : std::false_type +{ +}; - template struct has_resize().resize( size_t{} ) )>> : std::true_type {}; +template +struct has_resize().resize( size_t{} ) )>> : std::true_type +{ +}; - template constexpr bool has_resize_v = has_resize::value; +template constexpr bool has_resize_v = has_resize::value; - template - void validate_bounds( ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds ) +template +void validate_bounds( ArgumentContainer const &lower_bounds, ArgumentContainer const &upper_bounds ) +{ + std::ostringstream oss; + + if ( lower_bounds.size() == 0 ) { - std::ostringstream oss; - - if ( lower_bounds.size() == 0 ) + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The dimension of the problem cannot be zero."; + throw std::domain_error( oss.str() ); + } + + if ( upper_bounds.size() != lower_bounds.size() ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be the same number of lower bounds as upper bounds, but given "; + oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() + << " lower bounds."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < lower_bounds.size(); ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + if ( lb > ub ) { oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The dimension of the problem cannot be zero."; + oss << ": The upper bound must be greater than or equal to the lower bound, but the " + "upper bound is " + << ub << " and the lower is " << lb << "."; throw std::domain_error( oss.str() ); } - - if ( upper_bounds.size() != lower_bounds.size() ) + + if ( !FloatUtils::isFinite( lb ) ) { oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be the same number of lower bounds as upper bounds, but given "; - oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() << " lower bounds."; + oss << ": The lower bound must be finite, but got " << lb << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a " + "standard infinite->finite " + "transform."; throw std::domain_error( oss.str() ); } - - for ( size_t i = 0; i < lower_bounds.size(); ++i ) + + if ( !FloatUtils::isFinite( ub ) ) { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - if ( lb > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be greater than or equal to the lower bound, but the upper bound is " << ub - << " and the lower is " << lb << "."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( lb ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The lower bound must be finite, but got " << lb << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( ub ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be finite, but got " << ub << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be finite, but got " << ub << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a " + "standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); } } +} - template - std::vector random_initial_population( ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds, - size_t initial_population_size, URBG &&gen ) - { - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - constexpr bool has_resize = has_resize_v; - std::vector population( initial_population_size ); - auto const dimension = lower_bounds.size(); - - for ( size_t i = 0; i < population.size(); ++i ) - { - if constexpr ( has_resize ) - population[i].resize( dimension ); - else - { - // Argument type must be known at compile-time; like std::array: - if ( population[i].size() != dimension ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": For containers which do not have resize, the default size must be the same as the dimension, "; - oss << "but the default container size is " << population[i].size() << " and the dimension of the problem is " - << dimension << "."; - oss << " The function argument container type is " << typeid( ArgumentContainer ).name() << ".\n"; - throw std::runtime_error( oss.str() ); - } - } - } +template +std::vector random_initial_population( ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds, + size_t initial_population_size, + URBG &&gen ) +{ + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + constexpr bool has_resize = has_resize_v; + std::vector population( initial_population_size ); + auto const dimension = lower_bounds.size(); - // Why don't we provide an option to initialize with (say) a Gaussian distribution? - // > If the optimum's location is fairly well known, - // > a Gaussian distribution may prove somewhat faster, although it - // > may also increase the probability that the population will converge prematurely. - // > In general, uniform distributions are preferred, since they best reflect - // > the lack of knowledge about the optimum's location. - // - Differential Evolution: A Practical Approach to Global Optimization - // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are preferable. - // So this is something that could be investigated and potentially improved. - std::uniform_real_distribution dis( DimensionlessReal( 0 ), DimensionlessReal( 1 ) ); - for ( size_t i = 0; i < population.size(); ++i ) + for ( size_t i = 0; i < population.size(); ++i ) + { + if constexpr ( has_resize ) + population[i].resize( dimension ); + else { - for ( size_t j = 0; j < dimension; ++j ) + // Argument type must be known at compile-time; like std::array: + if ( population[i].size() != dimension ) { - auto const &lb = lower_bounds[j]; - auto const &ub = upper_bounds[j]; - population[i][j] = lb + dis( gen ) * ( ub - lb ); + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": For containers which do not have resize, the default size must be the " + "same as the dimension, "; + oss << "but the default container size is " << population[i].size() + << " and the dimension of the problem is " << dimension << "."; + oss << " The function argument container type is " + << typeid( ArgumentContainer ).name() << ".\n"; + throw std::runtime_error( oss.str() ); } } - - return population; } - template - void validate_initial_guess( ArgumentContainer const &initial_guess, - ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds) + // Why don't we provide an option to initialize with (say) a Gaussian distribution? + // > If the optimum's location is fairly well known, + // > a Gaussian distribution may prove somewhat faster, although it + // > may also increase the probability that the population will converge prematurely. + // > In general, uniform distributions are preferred, since they best reflect + // > the lack of knowledge about the optimum's location. + // - Differential Evolution: A Practical Approach to Global Optimization + // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are + // preferable. So this is something that could be investigated and potentially improved. + std::uniform_real_distribution dis( DimensionlessReal( 0 ), + DimensionlessReal( 1 ) ); + for ( size_t i = 0; i < population.size(); ++i ) { - std::ostringstream oss; - auto const dimension = lower_bounds.size(); - - if ( initial_guess.size() != dimension ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The initial guess must have the same dimensions as the problem,"; - oss << ", but the problem size is " << dimension << " and the initial guess has " << initial_guess.size() - << " elements."; - throw std::domain_error( oss.str() ); - } - - for ( size_t i = 0; i < dimension; ++i ) + for ( size_t j = 0; j < dimension; ++j ) { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - - if ( !FloatUtils::isFinite( initial_guess[i] ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << ", the initial guess is " << initial_guess[i] - << ", make sure all elements of the initial guess are finite."; - throw std::domain_error( oss.str() ); - } - - if ( initial_guess[i] < lb || initial_guess[i] > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << " the initial guess " << initial_guess[i] << " is not in the bounds [" << lb << ", " - << ub << "]."; - throw std::domain_error( oss.str() ); - } + auto const &lb = lower_bounds[j]; + auto const &ub = upper_bounds[j]; + population[i][j] = lb + dis( gen ) * ( ub - lb ); } } - // Single-Threaded Coordinate Descent + return population; +} - // We provide the parameters in a struct-there are too many of them and they are too unwieldy to pass individually: - template struct coordinate_descent_parameters +template +void validate_initial_guess( ArgumentContainer const &initial_guess, + ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds ) +{ + std::ostringstream oss; + auto const dimension = lower_bounds.size(); + + if ( initial_guess.size() != dimension ) { - using Real = typename ArgumentContainer::value_type; - ArgumentContainer lower_bounds; - ArgumentContainer upper_bounds; - - Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; - size_t max_iterations = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; - ArgumentContainer const *initial_guess = nullptr; - }; - - template - void validate_coordinate_descent_parameters( coordinate_descent_parameters const &cd_params ) + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The initial guess must have the same dimensions as the problem,"; + oss << ", but the problem size is " << dimension << " and the initial guess has " + << initial_guess.size() << " elements."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < dimension; ++i ) { - std::ostringstream oss; - validate_bounds( cd_params.lower_bounds, cd_params.upper_bounds ); - - if ( FloatUtils::isNan( cd_params.step_size ) || cd_params.step_size <= 0 ) + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + + if ( !FloatUtils::isFinite( initial_guess[i] ) ) { oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": step_size > 0 is required, but got step_size=" << cd_params.step_size << "."; + oss << ": At index " << i << ", the initial guess is " << initial_guess[i] + << ", make sure all elements of the initial guess are finite."; throw std::domain_error( oss.str() ); } - - if ( cd_params.max_iterations < 1 ) + + if ( initial_guess[i] < lb || initial_guess[i] > ub ) { oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be at least one generation."; - throw std::invalid_argument( oss.str() ); - } - - if ( cd_params.initial_guess ) - { - validate_initial_guess( *cd_params.initial_guess, cd_params.lower_bounds, cd_params.upper_bounds ); + oss << ": At index " << i << " the initial guess " << initial_guess[i] + << " is not in the bounds [" << lb << ", " << ub << "]."; + throw std::domain_error( oss.str() ); } } +} + +// Single-Threaded Coordinate Descent + +// We provide the parameters in a struct-there are too many of them and they are too unwieldy to +// pass individually: +template struct coordinate_descent_parameters +{ + using Real = typename ArgumentContainer::value_type; + ArgumentContainer lower_bounds; + ArgumentContainer upper_bounds; + + Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + // size_t max_iterations = + // GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + size_t max_iterations = 2; + ArgumentContainer const *initial_guess = nullptr; +}; + +template +void validate_coordinate_descent_parameters( + coordinate_descent_parameters const &cd_params ) +{ + std::ostringstream oss; + validate_bounds( cd_params.lower_bounds, cd_params.upper_bounds ); + + if ( FloatUtils::isNan( cd_params.step_size ) || cd_params.step_size <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": step_size > 0 is required, but got step_size=" << cd_params.step_size << "."; + throw std::domain_error( oss.str() ); + } - template - ArgumentContainer coordinate_descent( const Func cost_function, - coordinate_descent_parameters const &cd_params, - URBG &gen, - std::invoke_result_t target_value, - bool *cancellation = nullptr, - std::vector>> *queries = nullptr, - std::invoke_result_t *current_minimum_cost = nullptr) + if ( cd_params.max_iterations < 1 ) { - using ResultType = std::invoke_result_t; - - validate_coordinate_descent_parameters( cd_params ); - const size_t dimension = cd_params.lower_bounds.size(); - const size_t candidates_number = 2 * dimension; - auto step_size = cd_params.step_size; - auto max_iterations = cd_params.max_iterations; - auto guess = random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); - - if ( cd_params.initial_guess ) - { - guess[0] = *cd_params.initial_guess; - } - - std::vector candidates( candidates_number ); - std::vector costs( candidates_number ); - ArgumentContainer current_minimum = guess[0]; - - for ( size_t i = 0; i < max_iterations; ++i ) + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be at least one generation."; + throw std::invalid_argument( oss.str() ); + } + + if ( cd_params.initial_guess ) + { + validate_initial_guess( + *cd_params.initial_guess, cd_params.lower_bounds, cd_params.upper_bounds ); + } +} + +template +ArgumentContainer coordinate_descent( + const Func cost_function, + coordinate_descent_parameters const &cd_params, + URBG &gen, + std::invoke_result_t target_value, + bool *cancellation = nullptr, + std::vector>> + *queries = nullptr, + std::invoke_result_t *current_minimum_cost = nullptr ) +{ + using ResultType = std::invoke_result_t; + + validate_coordinate_descent_parameters( cd_params ); + const size_t dimension = cd_params.lower_bounds.size(); + const size_t candidates_number = 2 * dimension; + auto step_size = cd_params.step_size; + auto max_iterations = cd_params.max_iterations; + auto guess = + random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); + + if ( cd_params.initial_guess ) + { + guess[0] = *cd_params.initial_guess; + } + + std::vector candidates( candidates_number ); + std::vector costs( candidates_number ); + ArgumentContainer current_minimum = guess[0]; + + for ( size_t i = 0; i < max_iterations; ++i ) + { + for ( size_t j = 0; j < candidates_number; ++j ) { - for ( size_t j = 0; j < candidates_number; ++j) + candidates[j] = guess[0]; + size_t index = j / 2; + size_t sign = ( j % 2 == 0 ? 1 : -1 ); + candidates[j][index] += sign * step_size; + + costs[j] = cost_function( candidates[j] ); + + if ( current_minimum_cost && costs[j] < *current_minimum_cost ) { - candidates[j] = guess[0]; - size_t index = j / 2; - size_t sign = ( j % 2 == 0 ? 1 : -1 ); - candidates[j][index] += sign * step_size; - - costs[j] = cost_function( candidates[j] ); - - if ( current_minimum_cost && costs[j] < *current_minimum_cost ) - { - *current_minimum_cost = costs[j]; - current_minimum = candidates[j]; - } - - if ( !FloatUtils::isNan( target_value ) && costs[j] <= target_value ) - { - break; - } + *current_minimum_cost = costs[j]; + current_minimum = candidates[j]; + } + + if ( !FloatUtils::isNan( target_value ) && costs[j] <= target_value ) + { + break; } - - guess[0] = current_minimum; } - - return guess[0]; + + guess[0] = current_minimum; } + return guess[0]; +} + } // namespace NLR #endif diff --git a/src/nlr/DifferentialEvolution.h b/src/nlr/DifferentialEvolution.h index 030f338a84..eb27c0ef0e 100644 --- a/src/nlr/DifferentialEvolution.h +++ b/src/nlr/DifferentialEvolution.h @@ -18,511 +18,532 @@ #include #include #include -#include // for std::false_type +#include // for std::false_type #include #include namespace NLR { - template struct has_resize : std::false_type {}; +template struct has_resize : std::false_type +{ +}; - template struct has_resize().resize( size_t{} ) )>> : std::true_type {}; +template +struct has_resize().resize( size_t{} ) )>> : std::true_type +{ +}; - template constexpr bool has_resize_v = has_resize::value; +template constexpr bool has_resize_v = has_resize::value; - template - void validate_bounds( ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds ) +template +void validate_bounds( ArgumentContainer const &lower_bounds, ArgumentContainer const &upper_bounds ) +{ + std::ostringstream oss; + + if ( lower_bounds.size() == 0 ) { - std::ostringstream oss; - - if ( lower_bounds.size() == 0 ) + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The dimension of the problem cannot be zero."; + throw std::domain_error( oss.str() ); + } + + if ( upper_bounds.size() != lower_bounds.size() ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be the same number of lower bounds as upper bounds, but given "; + oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() + << " lower bounds."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < lower_bounds.size(); ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + if ( lb > ub ) { oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The dimension of the problem cannot be zero."; + oss << ": The upper bound must be greater than or equal to the lower bound, but the " + "upper bound is " + << ub << " and the lower is " << lb << "."; throw std::domain_error( oss.str() ); } - - if ( upper_bounds.size() != lower_bounds.size() ) + + if ( !FloatUtils::isFinite( lb ) ) { oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be the same number of lower bounds as upper bounds, but given "; - oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() << " lower bounds."; + oss << ": The lower bound must be finite, but got " << lb << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a " + "standard infinite->finite " + "transform."; throw std::domain_error( oss.str() ); } - - for ( size_t i = 0; i < lower_bounds.size(); ++i ) + + if ( !FloatUtils::isFinite( ub ) ) { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - if ( lb > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be greater than or equal to the lower bound, but the upper bound is " << ub - << " and the lower is " << lb << "."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( lb ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The lower bound must be finite, but got " << lb << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( ub ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be finite, but got " << ub << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be finite, but got " << ub << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a " + "standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); } } +} + +template +std::vector random_initial_population( ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds, + size_t initial_population_size, + URBG &&gen ) +{ + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + constexpr bool has_resize = has_resize_v; + std::vector population( initial_population_size ); + auto const dimension = lower_bounds.size(); - template - std::vector random_initial_population( ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds, - size_t initial_population_size, URBG &&gen ) + for ( size_t i = 0; i < population.size(); ++i ) { - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - constexpr bool has_resize = has_resize_v; - std::vector population( initial_population_size ); - auto const dimension = lower_bounds.size(); - - for ( size_t i = 0; i < population.size(); ++i ) + if constexpr ( has_resize ) + population[i].resize( dimension ); + else { - if constexpr ( has_resize ) - population[i].resize( dimension ); - else + // Argument type must be known at compile-time; like std::array: + if ( population[i].size() != dimension ) { - // Argument type must be known at compile-time; like std::array: - if ( population[i].size() != dimension ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": For containers which do not have resize, the default size must be the same as the dimension, "; - oss << "but the default container size is " << population[i].size() << " and the dimension of the problem is " - << dimension << "."; - oss << " The function argument container type is " << typeid( ArgumentContainer ).name() << ".\n"; - throw std::runtime_error( oss.str() ); - } + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": For containers which do not have resize, the default size must be the " + "same as the dimension, "; + oss << "but the default container size is " << population[i].size() + << " and the dimension of the problem is " << dimension << "."; + oss << " The function argument container type is " + << typeid( ArgumentContainer ).name() << ".\n"; + throw std::runtime_error( oss.str() ); } } + } - // Why don't we provide an option to initialize with (say) a Gaussian distribution? - // > If the optimum's location is fairly well known, - // > a Gaussian distribution may prove somewhat faster, although it - // > may also increase the probability that the population will converge prematurely. - // > In general, uniform distributions are preferred, since they best reflect - // > the lack of knowledge about the optimum's location. - // - Differential Evolution: A Practical Approach to Global Optimization - // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are preferable. - // So this is something that could be investigated and potentially improved. - std::uniform_real_distribution dis( DimensionlessReal( 0 ), DimensionlessReal( 1 ) ); - for ( size_t i = 0; i < population.size(); ++i ) + // Why don't we provide an option to initialize with (say) a Gaussian distribution? + // > If the optimum's location is fairly well known, + // > a Gaussian distribution may prove somewhat faster, although it + // > may also increase the probability that the population will converge prematurely. + // > In general, uniform distributions are preferred, since they best reflect + // > the lack of knowledge about the optimum's location. + // - Differential Evolution: A Practical Approach to Global Optimization + // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are + // preferable. So this is something that could be investigated and potentially improved. + std::uniform_real_distribution dis( DimensionlessReal( 0 ), + DimensionlessReal( 1 ) ); + for ( size_t i = 0; i < population.size(); ++i ) + { + for ( size_t j = 0; j < dimension; ++j ) { - for ( size_t j = 0; j < dimension; ++j ) - { - auto const &lb = lower_bounds[j]; - auto const &ub = upper_bounds[j]; - population[i][j] = lb + dis( gen ) * ( ub - lb ); - } + auto const &lb = lower_bounds[j]; + auto const &ub = upper_bounds[j]; + population[i][j] = lb + dis( gen ) * ( ub - lb ); } - - return population; } - template - void validate_initial_guess( ArgumentContainer const &initial_guess, - ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds) + return population; +} + +template +void validate_initial_guess( ArgumentContainer const &initial_guess, + ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds ) +{ + std::ostringstream oss; + auto const dimension = lower_bounds.size(); + + if ( initial_guess.size() != dimension ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The initial guess must have the same dimensions as the problem,"; + oss << ", but the problem size is " << dimension << " and the initial guess has " + << initial_guess.size() << " elements."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < dimension; ++i ) { - std::ostringstream oss; - auto const dimension = lower_bounds.size(); - - if ( initial_guess.size() != dimension ) + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + + if ( !FloatUtils::isFinite( initial_guess[i] ) ) { oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The initial guess must have the same dimensions as the problem,"; - oss << ", but the problem size is " << dimension << " and the initial guess has " << initial_guess.size() - << " elements."; + oss << ": At index " << i << ", the initial guess is " << initial_guess[i] + << ", make sure all elements of the initial guess are finite."; throw std::domain_error( oss.str() ); } - - for ( size_t i = 0; i < dimension; ++i ) + + if ( initial_guess[i] < lb || initial_guess[i] > ub ) { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - - if ( !FloatUtils::isFinite( initial_guess[i] ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << ", the initial guess is " << initial_guess[i] - << ", make sure all elements of the initial guess are finite."; - throw std::domain_error( oss.str() ); - } - - if ( initial_guess[i] < lb || initial_guess[i] > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << " the initial guess " << initial_guess[i] << " is not in the bounds [" << lb << ", " - << ub << "]."; - throw std::domain_error( oss.str() ); - } + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": At index " << i << " the initial guess " << initial_guess[i] + << " is not in the bounds [" << lb << ", " << ub << "]."; + throw std::domain_error( oss.str() ); } } +} + +// Return indices corresponding to the minimum function values. +template +std::vector best_indices( std::vector const &function_values ) +{ + const size_t n = function_values.size(); + std::vector indices( n ); - // Return indices corresponding to the minimum function values. - template - std::vector best_indices( std::vector const &function_values ) + for ( size_t i = 0; i < n; ++i ) { - const size_t n = function_values.size(); - std::vector indices( n ); - - for ( size_t i = 0; i < n; ++i ) + indices[i] = i; + } + + std::sort( indices.begin(), indices.end(), [&]( size_t a, size_t b ) { + if ( FloatUtils::isNan( function_values[a] ) ) { - indices[i] = i; + return false; } + if ( FloatUtils::isNan( function_values[b] ) ) + { + return true; + } + return function_values[a] < function_values[b]; + } ); + + return indices; +} - std::sort( indices.begin(), - indices.end(), - [&]( size_t a, size_t b ) - { - if ( FloatUtils::isNan( function_values[a] ) ) - { - return false; - } - if ( FloatUtils::isNan( function_values[b] ) ) - { - return true; - } - return function_values[a] < function_values[b]; - }); - - return indices; +template +auto weighted_lehmer_mean( RandomAccessContainer const &values, + RandomAccessContainer const &weights ) +{ + if ( values.size() != weights.size() ) + { + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be the same number of weights as values, but got " << values.size() + << " values and " << weights.size() << " weights."; + throw std::logic_error( oss.str() ); } - template - auto weighted_lehmer_mean( RandomAccessContainer const & values, - RandomAccessContainer const & weights) + if ( values.size() == 0 ) { - if ( values.size() != weights.size() ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be the same number of weights as values, but got " << values.size() - << " values and " << weights.size() << " weights."; - throw std::logic_error( oss.str() ); - } - - if ( values.size() == 0 ) + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must at least one value provided."; + throw std::logic_error( oss.str() ); + } + + using Real = typename RandomAccessContainer::value_type; + Real numerator = 0; + Real denominator = 0; + + for ( size_t i = 0; i < values.size(); ++i ) + { + if ( weights[i] < 0 || !FloatUtils::isFinite( weights[i] ) ) { std::ostringstream oss; oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must at least one value provided."; - throw std::logic_error( oss.str() ); + oss << ": All weights must be positive and finite, but got received weight " + << weights[i] << " at index " << i << " of " << weights.size() << "."; + throw std::domain_error( oss.str() ); } - - using Real = typename RandomAccessContainer::value_type; - Real numerator = 0; - Real denominator = 0; - - for ( size_t i = 0; i < values.size(); ++i ) - { - if ( weights[i] < 0 || !FloatUtils::isFinite( weights[i] ) ) + + Real tmp = weights[i] * values[i]; + numerator += tmp * values[i]; + denominator += tmp; + } + return numerator / denominator; +} + +// Storn, R., Price, K. (1997). Differential evolution-a simple and efficient heuristic for global +// optimization over continuous spaces. Journal of global optimization, 11, 341-359. See: +// https://www.cp.eng.chula.ac.th/~prabhas//teaching/ec/ec2012/storn_price_de.pdf + +// We provide the parameters in a struct-there are too many of them and they are too unwieldy to +// pass individually: +template struct differential_evolution_parameters +{ + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + ArgumentContainer lower_bounds; + ArgumentContainer upper_bounds; + + // mutation factor is also called scale factor or just F in the literature: + DimensionlessReal mutation_factor = static_cast( 0.65 ); + DimensionlessReal crossover_probability = static_cast( 0.5 ); + + // Population in each generation: + size_t NP = 500; + size_t max_generations = 1000; + ArgumentContainer const *initial_guess = nullptr; + unsigned threads = std::thread::hardware_concurrency(); +}; + +template +void validate_differential_evolution_parameters( + differential_evolution_parameters const &de_params ) +{ + std::ostringstream oss; + validate_bounds( de_params.lower_bounds, de_params.upper_bounds ); + if ( de_params.NP < 4 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The population size must be at least 4, but requested population size of " + << de_params.NP << "."; + throw std::invalid_argument( oss.str() ); + } + + // From: "Differential Evolution: A Practical Approach to Global Optimization (Natural Computing + // Series)" > The scale factor, F in (0,1+), is a positive real number that controls the rate at + // which the population evolves. > While there is no upper limit on F, effective values are + // seldom greater than 1.0. + // ... + // Also see "Limits on F", Section 2.5.1: + // > This discontinuity at F = 1 reduces the number of mutants by half and can result in erratic + // convergence... + auto F = de_params.mutation_factor; + if ( FloatUtils::isNan( F ) || F >= 1 || F <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": F in (0, 1) is required, but got F=" << F << "."; + throw std::domain_error( oss.str() ); + } + + if ( de_params.max_generations < 1 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be at least one generation."; + throw std::invalid_argument( oss.str() ); + } + + if ( de_params.initial_guess ) + { + validate_initial_guess( + *de_params.initial_guess, de_params.lower_bounds, de_params.upper_bounds ); + } + + if ( de_params.threads == 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be at least one thread."; + throw std::invalid_argument( oss.str() ); + } +} + +template +ArgumentContainer differential_evolution( + const Func cost_function, + differential_evolution_parameters const &de_params, + URBG &gen, + std::invoke_result_t target_value = + std::numeric_limits>::quiet_NaN(), + std::atomic *cancellation = nullptr, + std::vector>> + *queries = nullptr, + std::atomic> *current_minimum_cost = nullptr ) +{ + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + using ResultType = std::invoke_result_t; + + validate_differential_evolution_parameters( de_params ); + const size_t dimension = de_params.lower_bounds.size(); + auto NP = de_params.NP; + auto population = + random_initial_population( de_params.lower_bounds, de_params.upper_bounds, NP, gen ); + + if ( de_params.initial_guess ) + { + population[0] = *de_params.initial_guess; + } + + std::vector cost( NP, std::numeric_limits::quiet_NaN() ); + std::atomic target_attained = false; + + // This mutex is only used if the queries are stored: + std::mutex mt; + + std::vector thread_pool; + auto const threads = de_params.threads; + for ( size_t j = 0; j < threads; ++j ) + { + // Note that if some members of the population take way longer to compute, + // then this parallelization strategy is very suboptimal. + // However, we tried using std::async (which should be robust to this particular problem), + // but the overhead was just totally unacceptable on ARM Macs (the only platform tested). + // As the economists say "there are no solutions, only tradeoffs". + + thread_pool.emplace_back( [&, j]() { + for ( size_t i = j; i < cost.size(); i += threads ) { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": All weights must be positive and finite, but got received weight " << weights[i] - << " at index " << i << " of " << weights.size() << "."; - throw std::domain_error( oss.str() ); + cost[i] = cost_function( population[i] ); + if ( current_minimum_cost && cost[i] < *current_minimum_cost ) + { + *current_minimum_cost = cost[i]; + } + if ( queries ) + { + std::scoped_lock lock( mt ); + queries->push_back( std::make_pair( population[i], cost[i] ) ); + } + + if ( !FloatUtils::isNan( target_value ) && cost[i] <= target_value ) + { + target_attained = true; + } } - - Real tmp = weights[i] * values[i]; - numerator += tmp * values[i]; - denominator += tmp; - } - return numerator / denominator; + } ); } - // Storn, R., Price, K. (1997). Differential evolution-a simple and efficient heuristic for global optimization over - // continuous spaces. - // Journal of global optimization, 11, 341-359. - // See: - // https://www.cp.eng.chula.ac.th/~prabhas//teaching/ec/ec2012/storn_price_de.pdf - - // We provide the parameters in a struct-there are too many of them and they are too unwieldy to pass individually: - template struct differential_evolution_parameters + for ( auto &thread : thread_pool ) { - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - ArgumentContainer lower_bounds; - ArgumentContainer upper_bounds; - - // mutation factor is also called scale factor or just F in the literature: - DimensionlessReal mutation_factor = static_cast( 0.65 ); - DimensionlessReal crossover_probability = static_cast( 0.5 ); - - // Population in each generation: - size_t NP = 500; - size_t max_generations = 1000; - ArgumentContainer const *initial_guess = nullptr; - unsigned threads = std::thread::hardware_concurrency(); - }; - - template - void validate_differential_evolution_parameters( differential_evolution_parameters const &de_params ) + thread.join(); + } + + std::vector trial_vectors( NP ); + for ( size_t i = 0; i < NP; ++i ) { - std::ostringstream oss; - validate_bounds( de_params.lower_bounds, de_params.upper_bounds ); - if ( de_params.NP < 4 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The population size must be at least 4, but requested population size of " << de_params.NP << "."; - throw std::invalid_argument( oss.str() ); - } - - // From: "Differential Evolution: A Practical Approach to Global Optimization (Natural Computing Series)" - // > The scale factor, F in (0,1+), is a positive real number that controls the rate at which the population evolves. - // > While there is no upper limit on F, effective values are seldom greater than 1.0. - // ... - // Also see "Limits on F", Section 2.5.1: - // > This discontinuity at F = 1 reduces the number of mutants by half and can result in erratic convergence... - auto F = de_params.mutation_factor; - if ( FloatUtils::isNan( F ) || F >= 1 || F <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": F in (0, 1) is required, but got F=" << F << "."; - throw std::domain_error( oss.str() ); - } - - if ( de_params.max_generations < 1 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be at least one generation."; - throw std::invalid_argument( oss.str() ); - } - - if ( de_params.initial_guess ) - { - validate_initial_guess( *de_params.initial_guess, de_params.lower_bounds, de_params.upper_bounds ); - } - - if ( de_params.threads == 0 ) + if constexpr ( has_resize_v ) { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be at least one thread."; - throw std::invalid_argument( oss.str() ); + trial_vectors[i].resize( dimension ); } } - template - ArgumentContainer differential_evolution( const Func cost_function, - differential_evolution_parameters const &de_params, - URBG &gen, - std::invoke_result_t target_value - = std::numeric_limits>::quiet_NaN(), - std::atomic *cancellation = nullptr, - std::vector>> *queries = nullptr, - std::atomic> *current_minimum_cost = nullptr) + std::vector thread_generators( threads ); + for ( size_t j = 0; j < threads; ++j ) + { + thread_generators[j].seed( gen() ); + } + + // std::vector isn't threadsafe! + std::vector updated_indices( NP, 0 ); + + for ( size_t generation = 0; generation < de_params.max_generations; ++generation ) { - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - using ResultType = std::invoke_result_t; - - validate_differential_evolution_parameters( de_params ); - const size_t dimension = de_params.lower_bounds.size(); - auto NP = de_params.NP; - auto population = random_initial_population( de_params.lower_bounds, de_params.upper_bounds, NP, gen ); - - if ( de_params.initial_guess ) + if ( cancellation && *cancellation ) { - population[0] = *de_params.initial_guess; + break; } - - std::vector cost( NP, std::numeric_limits::quiet_NaN() ); - std::atomic target_attained = false; - - // This mutex is only used if the queries are stored: - std::mutex mt; - - std::vector thread_pool; - auto const threads = de_params.threads; + if ( target_attained ) + { + break; + } + + thread_pool.resize( 0 ); for ( size_t j = 0; j < threads; ++j ) { - // Note that if some members of the population take way longer to compute, - // then this parallelization strategy is very suboptimal. - // However, we tried using std::async (which should be robust to this particular problem), - // but the overhead was just totally unacceptable on ARM Macs (the only platform tested). - // As the economists say "there are no solutions, only tradeoffs". - - thread_pool.emplace_back( [&, j]() - { - for ( size_t i = j; i < cost.size(); i += threads ) - { - cost[i] = cost_function( population[i] ); - if ( current_minimum_cost && cost[i] < *current_minimum_cost ) - { - *current_minimum_cost = cost[i]; - } - if ( queries ) - { - std::scoped_lock lock( mt ); - queries->push_back( std::make_pair( population[i], cost[i] ) ); - } - - if ( !FloatUtils::isNan( target_value ) && cost[i] <= target_value ) - { - target_attained = true; - } - } - }); + thread_pool.emplace_back( [&, j]() { + auto &tlg = thread_generators[j]; + std::uniform_real_distribution unif01( DimensionlessReal( 0 ), + DimensionlessReal( 1 ) ); + + for ( size_t i = j; i < cost.size(); i += threads ) + { + if ( target_attained ) + { + return; + } + if ( cancellation && *cancellation ) + { + return; + } + + size_t r1, r2, r3; + do + { + r1 = tlg() % NP; + } + while ( r1 == i ); + + do + { + r2 = tlg() % NP; + } + while ( r2 == i || r2 == r1 ); + + do + { + r3 = tlg() % NP; + } + while ( r3 == i || r3 == r2 || r3 == r1 ); + + + for ( size_t k = 0; k < dimension; ++k ) + { + // See equation (4) of the reference: + auto guaranteed_changed_idx = tlg() % dimension; + if ( unif01( tlg ) < de_params.crossover_probability || + k == guaranteed_changed_idx ) + { + auto tmp = + population[r1][k] + de_params.mutation_factor * + ( population[r2][k] - population[r3][k] ); + auto const &lb = de_params.lower_bounds[k]; + auto const &ub = de_params.upper_bounds[k]; + // Some others recommend regenerating the indices rather than clamping; + // I dunno seems like it could get stuck regenerating . . . + trial_vectors[i][k] = std::clamp( tmp, lb, ub ); + } + else + { + trial_vectors[i][k] = population[i][k]; + } + } + + auto const trial_cost = cost_function( trial_vectors[i] ); + if ( FloatUtils::isNan( trial_cost ) ) + { + continue; + } + if ( queries ) + { + std::scoped_lock lock( mt ); + queries->push_back( std::make_pair( trial_vectors[i], trial_cost ) ); + } + + if ( trial_cost < cost[i] || FloatUtils::isNan( cost[i] ) ) + { + cost[i] = trial_cost; + if ( !FloatUtils::isNan( target_value ) && cost[i] <= target_value ) + { + target_attained = true; + } + + if ( current_minimum_cost && cost[i] < *current_minimum_cost ) + { + *current_minimum_cost = cost[i]; + } + + // Can't do this! It's a race condition! + // population[i] = trial_vectors[i]; + // Instead mark all the indices that need to be updated: + updated_indices[i] = 1; + } + } + } ); } - for ( auto &thread : thread_pool ) { thread.join(); } - std::vector trial_vectors( NP ); for ( size_t i = 0; i < NP; ++i ) { - if constexpr ( has_resize_v ) + if ( updated_indices[i] ) { - trial_vectors[i].resize( dimension ); + population[i] = trial_vectors[i]; + updated_indices[i] = 0; } } - - std::vector thread_generators( threads ); - for ( size_t j = 0; j < threads; ++j ) - { - thread_generators[j].seed( gen() ); - } - - // std::vector isn't threadsafe! - std::vector updated_indices( NP, 0 ); - - for ( size_t generation = 0; generation < de_params.max_generations; ++generation ) - { - if ( cancellation && *cancellation ) - { - break; - } - if ( target_attained ) - { - break; - } - - thread_pool.resize( 0 ); - for ( size_t j = 0; j < threads; ++j ) - { - thread_pool.emplace_back( [&, j]() - { - auto& tlg = thread_generators[j]; - std::uniform_real_distribution unif01( DimensionlessReal( 0 ), DimensionlessReal( 1 ) ); - - for ( size_t i = j; i < cost.size(); i += threads ) - { - if ( target_attained ) - { - return; - } - if ( cancellation && *cancellation ) - { - return; - } - - size_t r1, r2, r3; - do - { - r1 = tlg() % NP; - } while ( r1 == i ); - - do - { - r2 = tlg() % NP; - } while ( r2 == i || r2 == r1 ); - - do - { - r3 = tlg() % NP; - } while ( r3 == i || r3 == r2 || r3 == r1 ); - - - for ( size_t k = 0; k < dimension; ++k ) - { - // See equation (4) of the reference: - auto guaranteed_changed_idx = tlg() % dimension; - if ( unif01( tlg ) < de_params.crossover_probability || k == guaranteed_changed_idx ) - { - auto tmp = population[r1][k] + de_params.mutation_factor * ( population[r2][k] - population[r3][k] ); - auto const &lb = de_params.lower_bounds[k]; - auto const &ub = de_params.upper_bounds[k]; - // Some others recommend regenerating the indices rather than clamping; - // I dunno seems like it could get stuck regenerating . . . - trial_vectors[i][k] = std::clamp( tmp, lb, ub ); - } - else - { - trial_vectors[i][k] = population[i][k]; - } - } - - auto const trial_cost = cost_function( trial_vectors[i] ); - if ( FloatUtils::isNan( trial_cost ) ) - { - continue; - } - if ( queries ) - { - std::scoped_lock lock( mt ); - queries->push_back( std::make_pair( trial_vectors[i], trial_cost ) ); - } - - if ( trial_cost < cost[i] || FloatUtils::isNan( cost[i] ) ) - { - cost[i] = trial_cost; - if ( !FloatUtils::isNan( target_value ) && cost[i] <= target_value ) - { - target_attained = true; - } - - if ( current_minimum_cost && cost[i] < *current_minimum_cost ) - { - *current_minimum_cost = cost[i]; - } - - // Can't do this! It's a race condition! - //population[i] = trial_vectors[i]; - // Instead mark all the indices that need to be updated: - updated_indices[i] = 1; - } - } - }); - } - for ( auto &thread : thread_pool ) - { - thread.join(); - } - - for ( size_t i = 0; i < NP; ++i ) - { - if ( updated_indices[i] ) - { - population[i] = trial_vectors[i]; - updated_indices[i] = 0; - } - } - } - - auto it = std::min_element( cost.begin(), cost.end() ); - return population[std::distance( cost.begin(), it )]; } + auto it = std::min_element( cost.begin(), cost.end() ); + return population[std::distance( cost.begin(), it )]; +} + } // namespace NLR #endif diff --git a/src/nlr/Engine.cpp b/src/nlr/Engine.cpp new file mode 100644 index 0000000000..e137c3cbb9 --- /dev/null +++ b/src/nlr/Engine.cpp @@ -0,0 +1,3813 @@ +/********************* */ +/*! \file Engine.cpp + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Duligur Ibeling, Andrew Wu, Omri Isac + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** \brief [[ Add one-line brief description here ]] + ** + ** [[ Add lengthier description here ]] + **/ + +#include "Engine.h" + +#include "AutoConstraintMatrixAnalyzer.h" +#include "Debug.h" +#include "DisjunctionConstraint.h" +#include "EngineState.h" +#include "InfeasibleQueryException.h" +#include "MStringf.h" +#include "MalformedBasisException.h" +#include "MarabouError.h" +#include "NLRError.h" +#include "PiecewiseLinearConstraint.h" +#include "Preprocessor.h" +#include "Query.h" +#include "TableauRow.h" +#include "TimeUtils.h" +#include "VariableOutOfBoundDuringOptimizationException.h" +#include "Vector.h" + +#include + +Engine::Engine() + : _context() + , _boundManager( _context ) + , _tableau( _boundManager ) + , _preprocessedQuery( nullptr ) + , _rowBoundTightener( *_tableau ) + , _smtCore( this ) + , _numPlConstraintsDisabledByValidSplits( 0 ) + , _preprocessingEnabled( false ) + , _initialStateStored( false ) + , _work( NULL ) + , _basisRestorationRequired( Engine::RESTORATION_NOT_NEEDED ) + , _basisRestorationPerformed( Engine::NO_RESTORATION_PERFORMED ) + , _costFunctionManager( _tableau ) + , _quitRequested( false ) + , _exitCode( Engine::NOT_DONE ) + , _numVisitedStatesAtPreviousRestoration( 0 ) + , _networkLevelReasoner( NULL ) + , _verbosity( Options::get()->getInt( Options::VERBOSITY ) ) + , _lastNumVisitedStates( 0 ) + , _lastIterationWithProgress( 0 ) + , _symbolicBoundTighteningType( Options::get()->getSymbolicBoundTighteningType() ) + , _solveWithMILP( Options::get()->getBool( Options::SOLVE_WITH_MILP ) ) + , _lpSolverType( Options::get()->getLPSolverType() ) + , _gurobi( nullptr ) + , _milpEncoder( nullptr ) + , _soiManager( nullptr ) + , _simulationSize( Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ) ) + , _isGurobyEnabled( Options::get()->gurobiEnabled() ) + , _performLpTighteningAfterSplit( + Options::get()->getBool( Options::PERFORM_LP_TIGHTENING_AFTER_SPLIT ) ) + , _milpSolverBoundTighteningType( Options::get()->getMILPSolverBoundTighteningType() ) + , _sncMode( false ) + , _queryId( "" ) + , _produceUNSATProofs( Options::get()->getBool( Options::PRODUCE_PROOFS ) ) + , _groundBoundManager( _context ) + , _UNSATCertificate( NULL ) +{ + _smtCore.setStatistics( &_statistics ); + _tableau->setStatistics( &_statistics ); + _rowBoundTightener->setStatistics( &_statistics ); + _preprocessor.setStatistics( &_statistics ); + + _activeEntryStrategy = _projectedSteepestEdgeRule; + _activeEntryStrategy->setStatistics( &_statistics ); + _statistics.stampStartingTime(); + setRandomSeed( Options::get()->getInt( Options::SEED ) ); + + _boundManager.registerEngine( this ); + _groundBoundManager.registerEngine( this ); + _statisticsPrintingFrequency = ( _lpSolverType == LPSolverType::NATIVE ) + ? GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY + : GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY_GUROBI; + + _UNSATCertificateCurrentPointer = + _produceUNSATProofs ? new ( true ) + CVC4::context::CDO( &_context, NULL ) + : NULL; +} + +Engine::~Engine() +{ + if ( _work ) + { + delete[] _work; + _work = NULL; + } + + if ( _UNSATCertificate ) + { + delete _UNSATCertificate; + _UNSATCertificate = NULL; + } + + if ( _produceUNSATProofs && _UNSATCertificateCurrentPointer ) + _UNSATCertificateCurrentPointer->deleteSelf(); +} + +void Engine::setVerbosity( unsigned verbosity ) +{ + _verbosity = verbosity; +} + +void Engine::adjustWorkMemorySize() +{ + if ( _work ) + { + delete[] _work; + _work = NULL; + } + + _work = new double[_tableau->getM()]; + if ( !_work ) + throw MarabouError( MarabouError::ALLOCATION_FAILED, "Engine::work" ); +} + +void Engine::applySnCSplit( PiecewiseLinearCaseSplit sncSplit, String queryId ) +{ + _sncMode = true; + _sncSplit = sncSplit; + _queryId = queryId; + preContextPushHook(); + _smtCore.pushContext(); + applySplit( sncSplit ); + _boundManager.propagateTightenings(); +} + +bool Engine::inSnCMode() const +{ + return _sncMode; +} + +void Engine::setRandomSeed( unsigned seed ) +{ + srand( seed ); +} + +Query Engine::prepareSnCQuery() +{ + List bounds = _sncSplit.getBoundTightenings(); + List equations = _sncSplit.getEquations(); + + Query sncIPQ = *_preprocessedQuery; + for ( auto &equation : equations ) + sncIPQ.addEquation( equation ); + + for ( auto &bound : bounds ) + { + switch ( bound._type ) + { + case Tightening::LB: + sncIPQ.setLowerBound( bound._variable, bound._value ); + break; + + case Tightening::UB: + sncIPQ.setUpperBound( bound._variable, bound._value ); + } + } + + return sncIPQ; +} + +void Engine::exportQueryWithError( String errorMessage ) +{ + String ipqFileName = ( _queryId.length() > 0 ) ? _queryId + ".ipq" : "failedInputQuery.ipq"; + prepareSnCQuery().saveQuery( ipqFileName ); + printf( "Engine: %s!\nInput query has been saved as %s. Please attach the input query when you " + "open the issue on GitHub.\n", + errorMessage.ascii(), + ipqFileName.ascii() ); +} + +bool Engine::solve( double timeoutInSeconds ) +{ + SignalHandler::getInstance()->initialize(); + SignalHandler::getInstance()->registerClient( this ); + + // Register the boundManager with all the PL constraints + for ( auto &plConstraint : _plConstraints ) + plConstraint->registerBoundManager( &_boundManager ); + for ( auto &nlConstraint : _nlConstraints ) + nlConstraint->registerBoundManager( &_boundManager ); + + // Before encoding, make sure all valid constraints are applied. + applyAllValidConstraintCaseSplits(); + + if ( _solveWithMILP ) + return solveWithMILPEncoding( timeoutInSeconds ); + + updateDirections(); + if ( _lpSolverType == LPSolverType::NATIVE ) + storeInitialEngineState(); + else if ( _lpSolverType == LPSolverType::GUROBI ) + { + ENGINE_LOG( "Encoding convex relaxation into Gurobi..." ); + _gurobi = std::unique_ptr( new GurobiWrapper() ); + _tableau->setGurobi( &( *_gurobi ) ); + _milpEncoder = std::unique_ptr( new MILPEncoder( *_tableau ) ); + _milpEncoder->setStatistics( &_statistics ); + _milpEncoder->encodeQuery( *_gurobi, *_preprocessedQuery, true ); + ENGINE_LOG( "Encoding convex relaxation into Gurobi - done" ); + } + + mainLoopStatistics(); + if ( _verbosity > 0 ) + { + printf( "\nEngine::solve: Initial statistics\n" ); + _statistics.print(); + printf( "\n---\n" ); + } + + bool splitJustPerformed = true; + struct timespec mainLoopStart = TimeUtils::sampleMicro(); + while ( true ) + { + struct timespec mainLoopEnd = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, + TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); + mainLoopStart = mainLoopEnd; + + if ( shouldExitDueToTimeout( timeoutInSeconds ) ) + { + if ( _verbosity > 0 ) + { + printf( "\n\nEngine: quitting due to timeout...\n\n" ); + printf( "Final statistics:\n" ); + _statistics.print(); + } + + _exitCode = Engine::TIMEOUT; + _statistics.timeout(); + return false; + } + + if ( _quitRequested ) + { + if ( _verbosity > 0 ) + { + printf( "\n\nEngine: quitting due to external request...\n\n" ); + printf( "Final statistics:\n" ); + _statistics.print(); + } + + _exitCode = Engine::QUIT_REQUESTED; + return false; + } + + try + { + DEBUG( _tableau->verifyInvariants() ); + + mainLoopStatistics(); + if ( _verbosity > 1 && + _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ) % + _statisticsPrintingFrequency == + 0 ) + _statistics.print(); + + if ( _lpSolverType == LPSolverType::NATIVE ) + { + checkOverallProgress(); + // Check whether progress has been made recently + + if ( performPrecisionRestorationIfNeeded() ) + continue; + + if ( _tableau->basisMatrixAvailable() ) + { + explicitBasisBoundTightening(); + _boundManager.propagateTightenings(); + applyAllValidConstraintCaseSplits(); + } + } + + // If true, we just entered a new subproblem + if ( splitJustPerformed ) + { + performBoundTighteningAfterCaseSplit(); + informLPSolverOfBounds(); + splitJustPerformed = false; + } + + // Perform any SmtCore-initiated case splits + if ( _smtCore.needToSplit() ) + { + _smtCore.performSplit(); + splitJustPerformed = true; + continue; + } + + if ( !_tableau->allBoundsValid() ) + { + // Some variable bounds are invalid, so the query is unsat + throw InfeasibleQueryException(); + } + + if ( allVarsWithinBounds() ) + { + // It's possible that a disjunction constraint is fixed and additional constraints + // are introduced, making the linear portion unsatisfied. So we need to make sure + // there are no valid case splits that we do not know of. + applyAllBoundTightenings(); + if ( applyAllValidConstraintCaseSplits() ) + continue; + + // The linear portion of the problem has been solved. + // Check the status of the PL constraints + bool solutionFound = adjustAssignmentToSatisfyNonLinearConstraints(); + if ( solutionFound ) + { + if ( allNonlinearConstraintsHold() ) + { + mainLoopEnd = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( + Statistics::TIME_MAIN_LOOP_MICRO, + TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); + if ( _verbosity > 0 ) + { + printf( "\nEngine::solve: sat assignment found\n" ); + _statistics.print(); + } + + // Allows checking proofs produced for UNSAT leaves of satisfiable query + // search tree + if ( _produceUNSATProofs ) + { + ASSERT( _UNSATCertificateCurrentPointer ); + ( **_UNSATCertificateCurrentPointer ).setSATSolutionFlag(); + } + _exitCode = Engine::SAT; + return true; + } + else if ( !hasBranchingCandidate() ) + { + mainLoopEnd = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( + Statistics::TIME_MAIN_LOOP_MICRO, + TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); + if ( _verbosity > 0 ) + { + printf( "\nEngine::solve: at leaf node but solving inconclusive\n" ); + _statistics.print(); + } + _exitCode = Engine::UNKNOWN; + return false; + } + else + { + while ( !_smtCore.needToSplit() ) + _smtCore.reportRejectedPhasePatternProposal(); + continue; + } + } + else + { + continue; + } + } + + // We have out-of-bounds variables. + if ( _lpSolverType == LPSolverType::NATIVE ) + performSimplexStep(); + else + { + ENGINE_LOG( "Checking LP feasibility with Gurobi..." ); + DEBUG( { checkGurobiBoundConsistency(); } ); + ASSERT( _lpSolverType == LPSolverType::GUROBI ); + LinearExpression dontCare; + minimizeCostWithGurobi( dontCare ); + } + continue; + } + catch ( const MalformedBasisException & ) + { + _tableau->toggleOptimization( false ); + if ( !handleMalformedBasisException() ) + { + ASSERT( _lpSolverType == LPSolverType::NATIVE ); + _exitCode = Engine::ERROR; + exportQueryWithError( "Cannot restore tableau" ); + mainLoopEnd = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, + TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); + return false; + } + } + catch ( const InfeasibleQueryException & ) + { + _tableau->toggleOptimization( false ); + // The current query is unsat, and we need to pop. + // If we're at level 0, the whole query is unsat. + if ( _produceUNSATProofs ) + explainSimplexFailure(); + + if ( !_smtCore.popSplit() ) + { + mainLoopEnd = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, + TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); + if ( _verbosity > 0 ) + { + printf( "\nEngine::solve: unsat query\n" ); + _statistics.print(); + } + _exitCode = Engine::UNSAT; + return false; + } + else + { + splitJustPerformed = true; + } + } + catch ( const VariableOutOfBoundDuringOptimizationException & ) + { + _tableau->toggleOptimization( false ); + continue; + } + catch ( MarabouError &e ) + { + String message = Stringf( + "Caught a MarabouError. Code: %u. Message: %s ", e.getCode(), e.getUserMessage() ); + _exitCode = Engine::ERROR; + exportQueryWithError( message ); + mainLoopEnd = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, + TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); + return false; + } + catch ( ... ) + { + _exitCode = Engine::ERROR; + exportQueryWithError( "Unknown error" ); + mainLoopEnd = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, + TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); + return false; + } + } +} + +void Engine::mainLoopStatistics() +{ + struct timespec start = TimeUtils::sampleMicro(); + + unsigned activeConstraints = 0; + for ( const auto &constraint : _plConstraints ) + if ( constraint->isActive() ) + ++activeConstraints; + + _statistics.setUnsignedAttribute( Statistics::NUM_ACTIVE_PL_CONSTRAINTS, activeConstraints ); + _statistics.setUnsignedAttribute( Statistics::NUM_PL_VALID_SPLITS, + _numPlConstraintsDisabledByValidSplits ); + _statistics.setUnsignedAttribute( Statistics::NUM_PL_SMT_ORIGINATED_SPLITS, + _plConstraints.size() - activeConstraints - + _numPlConstraintsDisabledByValidSplits ); + + _statistics.incLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_HANDLING_STATISTICS_MICRO, + TimeUtils::timePassed( start, end ) ); +} + +void Engine::performBoundTighteningAfterCaseSplit() +{ + // Tighten bounds of a first hidden layer with MILP solver + performMILPSolverBoundedTighteningForSingleLayer( 1 ); + do + { + performSymbolicBoundTightening(); + } + while ( applyAllValidConstraintCaseSplits() ); + + // Tighten bounds of an output layer with MILP solver + if ( _networkLevelReasoner ) // to avoid failing of system test. + performMILPSolverBoundedTighteningForSingleLayer( + _networkLevelReasoner->getLayerIndexToLayer().size() - 1 ); +} + +bool Engine::adjustAssignmentToSatisfyNonLinearConstraints() +{ + ENGINE_LOG( "Linear constraints satisfied. Now trying to satisfy non-linear" + " constraints..." ); + collectViolatedPlConstraints(); + + // If all constraints are satisfied, we are possibly done + if ( allPlConstraintsHold() ) + { + if ( _lpSolverType == LPSolverType::NATIVE && + _tableau->getBasicAssignmentStatus() != ITableau::BASIC_ASSIGNMENT_JUST_COMPUTED ) + { + if ( _verbosity > 0 ) + { + printf( "Before declaring sat, recomputing...\n" ); + } + // Make sure that the assignment is precise before declaring success + _tableau->computeAssignment(); + // If we actually have a real satisfying assignment, + return false; + } + else + return true; + } + else if ( !GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH ) + { + // We have violated piecewise-linear constraints. + performConstraintFixingStep(); + + // Finally, take this opporunity to tighten any bounds + // and perform any valid case splits. + tightenBoundsOnConstraintMatrix(); + _boundManager.propagateTightenings(); + // For debugging purposes + checkBoundCompliancyWithDebugSolution(); + + while ( applyAllValidConstraintCaseSplits() ) + performSymbolicBoundTightening(); + return false; + } + else + { + return performDeepSoILocalSearch(); + } +} + +bool Engine::performPrecisionRestorationIfNeeded() +{ + // If the basis has become malformed, we need to restore it + if ( basisRestorationNeeded() ) + { + if ( _basisRestorationRequired == Engine::STRONG_RESTORATION_NEEDED ) + { + performPrecisionRestoration( PrecisionRestorer::RESTORE_BASICS ); + _basisRestorationPerformed = Engine::PERFORMED_STRONG_RESTORATION; + } + else + { + performPrecisionRestoration( PrecisionRestorer::DO_NOT_RESTORE_BASICS ); + _basisRestorationPerformed = Engine::PERFORMED_WEAK_RESTORATION; + } + + _numVisitedStatesAtPreviousRestoration = + _statistics.getUnsignedAttribute( Statistics::NUM_VISITED_TREE_STATES ); + _basisRestorationRequired = Engine::RESTORATION_NOT_NEEDED; + return true; + } + + // Restoration is not required + _basisRestorationPerformed = Engine::NO_RESTORATION_PERFORMED; + + // Possible restoration due to preceision degradation + if ( shouldCheckDegradation() && highDegradation() ) + { + performPrecisionRestoration( PrecisionRestorer::RESTORE_BASICS ); + return true; + } + + return false; +} + +bool Engine::handleMalformedBasisException() +{ + // Debug + printf( "MalformedBasisException caught!\n" ); + // + + if ( _basisRestorationPerformed == Engine::NO_RESTORATION_PERFORMED ) + { + if ( _numVisitedStatesAtPreviousRestoration != + _statistics.getUnsignedAttribute( Statistics::NUM_VISITED_TREE_STATES ) ) + { + // We've tried a strong restoration before, and it didn't work. Do a weak restoration + _basisRestorationRequired = Engine::WEAK_RESTORATION_NEEDED; + } + else + { + _basisRestorationRequired = Engine::STRONG_RESTORATION_NEEDED; + } + return true; + } + else if ( _basisRestorationPerformed == Engine::PERFORMED_STRONG_RESTORATION ) + { + _basisRestorationRequired = Engine::WEAK_RESTORATION_NEEDED; + return true; + } + else + return false; +} + +void Engine::performConstraintFixingStep() +{ + // Statistics + _statistics.incLongAttribute( Statistics::NUM_CONSTRAINT_FIXING_STEPS ); + struct timespec start = TimeUtils::sampleMicro(); + + // Select a violated constraint as the target + selectViolatedPlConstraint(); + + // Report the violated constraint to the SMT engine + reportPlViolation(); + + // Attempt to fix the constraint + fixViolatedPlConstraintIfPossible(); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_CONSTRAINT_FIXING_STEPS_MICRO, + TimeUtils::timePassed( start, end ) ); +} + +bool Engine::performSimplexStep() +{ + // Statistics + _statistics.incLongAttribute( Statistics::NUM_SIMPLEX_STEPS ); + struct timespec start = TimeUtils::sampleMicro(); + + /* + In order to increase numerical stability, we attempt to pick a + "good" entering/leaving combination, by trying to avoid tiny pivot + values. We do this as follows: + + 1. Pick an entering variable according to the strategy in use. + 2. Find the entailed leaving variable. + 3. If the combination is bad, go back to (1) and find the + next-best entering variable. + */ + + if ( _tableau->isOptimizing() ) + _costFunctionManager->computeGivenCostFunction( _heuristicCost._addends ); + if ( _costFunctionManager->costFunctionInvalid() ) + _costFunctionManager->computeCoreCostFunction(); + else + _costFunctionManager->adjustBasicCostAccuracy(); + + DEBUG( { + // Since we're performing a simplex step, there are out-of-bounds variables. + // Therefore, if the cost function is fresh, it should not be zero. + if ( _costFunctionManager->costFunctionJustComputed() ) + { + const double *costFunction = _costFunctionManager->getCostFunction(); + unsigned size = _tableau->getN() - _tableau->getM(); + bool found = false; + for ( unsigned i = 0; i < size; ++i ) + { + if ( !FloatUtils::isZero( costFunction[i] ) ) + { + found = true; + break; + } + } + + if ( !found ) + { + printf( "Error! Have OOB vars but cost function is zero.\n" + "Recomputing cost function. New one is:\n" ); + _costFunctionManager->computeCoreCostFunction(); + _costFunctionManager->dumpCostFunction(); + throw MarabouError( MarabouError::DEBUGGING_ERROR, + "Have OOB vars but cost function is zero" ); + } + } + } ); + + // Obtain all eligible entering variables + List enteringVariableCandidates; + _tableau->getEntryCandidates( enteringVariableCandidates ); + + unsigned bestLeaving = 0; + double bestChangeRatio = 0.0; + Set excludedEnteringVariables; + bool haveCandidate = false; + unsigned bestEntering = 0; + double bestPivotEntry = 0.0; + unsigned tries = GlobalConfiguration::MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS; + + while ( tries > 0 ) + { + --tries; + + // Attempt to pick the best entering variable from the available candidates + if ( !_activeEntryStrategy->select( + _tableau, enteringVariableCandidates, excludedEnteringVariables ) ) + { + // No additional candidates can be found. + break; + } + + // We have a candidate! + haveCandidate = true; + + // We don't want to re-consider this candidate in future + // iterations + excludedEnteringVariables.insert( _tableau->getEnteringVariableIndex() ); + + // Pick a leaving variable + _tableau->computeChangeColumn(); + _tableau->pickLeavingVariable(); + + // A fake pivot always wins + if ( _tableau->performingFakePivot() ) + { + bestEntering = _tableau->getEnteringVariableIndex(); + bestLeaving = _tableau->getLeavingVariableIndex(); + bestChangeRatio = _tableau->getChangeRatio(); + memcpy( _work, _tableau->getChangeColumn(), sizeof( double ) * _tableau->getM() ); + break; + } + + // Is the newly found pivot better than the stored one? + unsigned leavingIndex = _tableau->getLeavingVariableIndex(); + double pivotEntry = FloatUtils::abs( _tableau->getChangeColumn()[leavingIndex] ); + if ( pivotEntry > bestPivotEntry ) + { + bestEntering = _tableau->getEnteringVariableIndex(); + bestPivotEntry = pivotEntry; + bestLeaving = leavingIndex; + bestChangeRatio = _tableau->getChangeRatio(); + memcpy( _work, _tableau->getChangeColumn(), sizeof( double ) * _tableau->getM() ); + } + + // If the pivot is greater than the sought-after threshold, we + // are done. + if ( bestPivotEntry >= GlobalConfiguration::ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD ) + break; + else + _statistics.incLongAttribute( + Statistics::NUM_SIMPLEX_PIVOT_SELECTIONS_IGNORED_FOR_STABILITY ); + } + + // If we don't have any candidates, this simplex step has failed. + if ( !haveCandidate ) + { + if ( _tableau->getBasicAssignmentStatus() != ITableau::BASIC_ASSIGNMENT_JUST_COMPUTED ) + { + // This failure might have resulted from a corrupt basic assignment. + _tableau->computeAssignment(); + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, + TimeUtils::timePassed( start, end ) ); + return false; + } + else if ( !_costFunctionManager->costFunctionJustComputed() ) + { + // This failure might have resulted from a corrupt cost function. + ASSERT( _costFunctionManager->getCostFunctionStatus() == + ICostFunctionManager::COST_FUNCTION_UPDATED ); + _costFunctionManager->invalidateCostFunction(); + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, + TimeUtils::timePassed( start, end ) ); + return false; + } + else + { + // Cost function is fresh --- failure is real. + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, + TimeUtils::timePassed( start, end ) ); + if ( _tableau->isOptimizing() ) + { + // The current solution is optimal. + return true; + } + else + throw InfeasibleQueryException(); + } + } + + // Set the best choice in the tableau + _tableau->setEnteringVariableIndex( bestEntering ); + _tableau->setLeavingVariableIndex( bestLeaving ); + _tableau->setChangeColumn( _work ); + _tableau->setChangeRatio( bestChangeRatio ); + + bool fakePivot = _tableau->performingFakePivot(); + + if ( !fakePivot && bestPivotEntry < GlobalConfiguration::ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD ) + { + /* + Despite our efforts, we are stuck with a small pivot. If basis factorization + isn't fresh, refresh it and terminate this step - perhaps in the next iteration + a better pivot will be found + */ + if ( !_tableau->basisMatrixAvailable() ) + { + _tableau->refreshBasisFactorization(); + return false; + } + + _statistics.incLongAttribute( Statistics::NUM_SIMPLEX_UNSTABLE_PIVOTS ); + } + + if ( !fakePivot ) + { + _tableau->computePivotRow(); + _rowBoundTightener->examinePivotRow(); + } + + // Perform the actual pivot + _activeEntryStrategy->prePivotHook( _tableau, fakePivot ); + _tableau->performPivot(); + _activeEntryStrategy->postPivotHook( _tableau, fakePivot ); + _boundManager.propagateTightenings(); + _costFunctionManager->invalidateCostFunction(); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, + TimeUtils::timePassed( start, end ) ); + return false; +} + +void Engine::fixViolatedPlConstraintIfPossible() +{ + List fixes; + + if ( GlobalConfiguration::USE_SMART_FIX ) + fixes = _plConstraintToFix->getSmartFixes( _tableau ); + else + fixes = _plConstraintToFix->getPossibleFixes(); + + // First, see if we can fix without pivoting. We are looking for a fix concerning a + // non-basic variable, that doesn't set that variable out-of-bounds. + for ( const auto &fix : fixes ) + { + if ( !_tableau->isBasic( fix._variable ) ) + { + if ( _tableau->checkValueWithinBounds( fix._variable, fix._value ) ) + { + _tableau->setNonBasicAssignment( fix._variable, fix._value, true ); + return; + } + } + } + + // No choice, have to pivot. Look for a fix concerning a basic variable, that + // doesn't set that variable out-of-bounds. If smart-fix is enabled and implemented, + // we should probably not reach this point. + bool found = false; + auto it = fixes.begin(); + while ( !found && it != fixes.end() ) + { + if ( _tableau->isBasic( it->_variable ) ) + { + if ( _tableau->checkValueWithinBounds( it->_variable, it->_value ) ) + { + found = true; + } + } + if ( !found ) + { + ++it; + } + } + + // If we couldn't find an eligible fix, give up + if ( !found ) + return; + + PiecewiseLinearConstraint::Fix fix = *it; + ASSERT( _tableau->isBasic( fix._variable ) ); + + TableauRow row( _tableau->getN() - _tableau->getM() ); + _tableau->getTableauRow( _tableau->variableToIndex( fix._variable ), &row ); + + // Pick the variable with the largest coefficient in this row for pivoting, + // to increase numerical stability. + unsigned bestCandidate = row._row[0]._var; + double bestValue = FloatUtils::abs( row._row[0]._coefficient ); + + unsigned n = _tableau->getN(); + unsigned m = _tableau->getM(); + for ( unsigned i = 1; i < n - m; ++i ) + { + double contenderValue = FloatUtils::abs( row._row[i]._coefficient ); + if ( FloatUtils::gt( contenderValue, bestValue ) ) + { + bestValue = contenderValue; + bestCandidate = row._row[i]._var; + } + } + + if ( FloatUtils::isZero( bestValue ) ) + { + // This can happen, e.g., if we have an equation x = 5, and is legal behavior. + return; + } + + // Switch between nonBasic and the variable we need to fix + _tableau->setEnteringVariableIndex( _tableau->variableToIndex( bestCandidate ) ); + _tableau->setLeavingVariableIndex( _tableau->variableToIndex( fix._variable ) ); + + // Make sure the change column and pivot row are up-to-date - strategies + // such as projected steepest edge need these for their internal updates. + _tableau->computeChangeColumn(); + _tableau->computePivotRow(); + + _activeEntryStrategy->prePivotHook( _tableau, false ); + _tableau->performDegeneratePivot(); + _activeEntryStrategy->postPivotHook( _tableau, false ); + + ASSERT( !_tableau->isBasic( fix._variable ) ); + _tableau->setNonBasicAssignment( fix._variable, fix._value, true ); +} + +bool Engine::processInputQuery( const IQuery &inputQuery ) +{ + return processInputQuery( inputQuery, GlobalConfiguration::PREPROCESS_INPUT_QUERY ); +} + +bool Engine::calculateBounds( const IQuery &inputQuery ) +{ + ENGINE_LOG( "calculateBounds starting\n" ); + struct timespec start = TimeUtils::sampleMicro(); + + try + { + invokePreprocessor( inputQuery, true ); + if ( _verbosity > 1 ) + printInputBounds( inputQuery ); + + initializeNetworkLevelReasoning(); + + performSymbolicBoundTightening( &( *_preprocessedQuery ) ); + performSimulation(); + performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); + performAdditionalBackwardAnalysisIfNeeded(); + + if ( _networkLevelReasoner && Options::get()->getBool( Options::DUMP_BOUNDS ) ) + _networkLevelReasoner->dumpBounds(); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.setLongAttribute( Statistics::CALCULATE_BOUNDS_TIME_MICRO, + TimeUtils::timePassed( start, end ) ); + if ( !_tableau->allBoundsValid() ) + { + // Some variable bounds are invalid, so the query is unsat + throw InfeasibleQueryException(); + } + } + catch ( const InfeasibleQueryException & ) + { + ENGINE_LOG( "calculateBounds done\n" ); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.setLongAttribute( Statistics::CALCULATE_BOUNDS_TIME_MICRO, + TimeUtils::timePassed( start, end ) ); + + _exitCode = Engine::UNSAT; + printf( "unsat\n" ); + + return false; + } + + ENGINE_LOG( "calculateBounds done\n" ); + + return true; +} + +void Engine::invokePreprocessor( const IQuery &inputQuery, bool preprocess ) +{ + if ( _verbosity > 0 ) + printf( "Engine::processInputQuery: Input query (before preprocessing): " + "%u equations, %u variables\n", + inputQuery.getNumberOfEquations(), + inputQuery.getNumberOfVariables() ); + + // If processing is enabled, invoke the preprocessor + _preprocessingEnabled = preprocess; + if ( _preprocessingEnabled ) + _preprocessedQuery = _preprocessor.preprocess( + inputQuery, GlobalConfiguration::PREPROCESSOR_ELIMINATE_VARIABLES ); + else + { + _preprocessedQuery = std::unique_ptr( inputQuery.generateQuery() ); + Preprocessor().informConstraintsOfInitialBounds( *_preprocessedQuery ); + } + + if ( _verbosity > 0 ) + printf( "Engine::processInputQuery: Input query (after preprocessing): " + "%u equations, %u variables\n\n", + _preprocessedQuery->getNumberOfEquations(), + _preprocessedQuery->getNumberOfVariables() ); + + unsigned infiniteBounds = _preprocessedQuery->countInfiniteBounds(); + if ( infiniteBounds != 0 ) + { + _exitCode = Engine::ERROR; + throw MarabouError( MarabouError::UNBOUNDED_VARIABLES_NOT_YET_SUPPORTED, + Stringf( "Error! Have %u infinite bounds", infiniteBounds ).ascii() ); + } +} + +void Engine::printInputBounds( const IQuery &inputQuery ) const +{ + printf( "Input bounds:\n" ); + for ( unsigned i = 0; i < inputQuery.getNumInputVariables(); ++i ) + { + unsigned variable = inputQuery.inputVariableByIndex( i ); + double lb, ub; + bool fixed = false; + if ( _preprocessingEnabled ) + { + // Fixed variables are easy: return the value they've been fixed to. + if ( _preprocessor.variableIsFixed( variable ) ) + { + fixed = true; + lb = _preprocessor.getFixedValue( variable ); + ub = lb; + } + else + { + // Has the variable been merged into another? + while ( _preprocessor.variableIsMerged( variable ) ) + variable = _preprocessor.getMergedIndex( variable ); + + // We know which variable to look for, but it may have been assigned + // a new index, due to variable elimination + variable = _preprocessor.getNewIndex( variable ); + + lb = _preprocessedQuery->getLowerBound( variable ); + ub = _preprocessedQuery->getUpperBound( variable ); + } + } + else + { + lb = inputQuery.getLowerBound( variable ); + ub = inputQuery.getUpperBound( variable ); + } + + printf( "\tx%u: [%8.4lf, %8.4lf] %s\n", i, lb, ub, fixed ? "[FIXED]" : "" ); + } + printf( "\n" ); +} + +void Engine::storeEquationsInDegradationChecker() +{ + _degradationChecker.storeEquations( *_preprocessedQuery ); +} + +double *Engine::createConstraintMatrix() +{ + const List &equations( _preprocessedQuery->getEquations() ); + unsigned m = equations.size(); + unsigned n = _preprocessedQuery->getNumberOfVariables(); + + // Step 1: create a constraint matrix from the equations + double *constraintMatrix = new double[n * m]; + if ( !constraintMatrix ) + throw MarabouError( MarabouError::ALLOCATION_FAILED, "Engine::constraintMatrix" ); + std::fill_n( constraintMatrix, n * m, 0.0 ); + + unsigned equationIndex = 0; + for ( const auto &equation : equations ) + { + if ( equation._type != Equation::EQ ) + { + _exitCode = Engine::ERROR; + throw MarabouError( MarabouError::NON_EQUALITY_INPUT_EQUATION_DISCOVERED ); + } + + for ( const auto &addend : equation._addends ) + constraintMatrix[equationIndex * n + addend._variable] = addend._coefficient; + + ++equationIndex; + } + + return constraintMatrix; +} + +void Engine::removeRedundantEquations( const double *constraintMatrix ) +{ + const List &equations( _preprocessedQuery->getEquations() ); + unsigned m = equations.size(); + unsigned n = _preprocessedQuery->getNumberOfVariables(); + + // Step 1: analyze the matrix to identify redundant rows + AutoConstraintMatrixAnalyzer analyzer; + analyzer->analyze( constraintMatrix, m, n ); + + ENGINE_LOG( + Stringf( "Number of redundant rows: %u out of %u", analyzer->getRedundantRows().size(), m ) + .ascii() ); + + // Step 2: remove any equations corresponding to redundant rows + Set redundantRows = analyzer->getRedundantRows(); + + if ( !redundantRows.empty() ) + { + _preprocessedQuery->removeEquationsByIndex( redundantRows ); + m = equations.size(); + } +} + +void Engine::selectInitialVariablesForBasis( const double *constraintMatrix, + List &initialBasis, + List &basicRows ) +{ + /* + This method permutes rows and columns in the constraint matrix (prior + to the addition of auxiliary variables), in order to obtain a set of + column that constitue a lower triangular matrix. The variables + corresponding to the columns of this matrix join the initial basis. + + (It is possible that not enough variables are obtained this way, in which + case the initial basis will have to be augmented later). + */ + + const List &equations( _preprocessedQuery->getEquations() ); + + unsigned m = equations.size(); + unsigned n = _preprocessedQuery->getNumberOfVariables(); + + // Trivial case, or if a trivial basis is requested + if ( ( m == 0 ) || ( n == 0 ) || GlobalConfiguration::ONLY_AUX_INITIAL_BASIS ) + { + for ( unsigned i = 0; i < m; ++i ) + basicRows.append( i ); + + return; + } + + unsigned *nnzInRow = new unsigned[m]; + unsigned *nnzInColumn = new unsigned[n]; + + std::fill_n( nnzInRow, m, 0 ); + std::fill_n( nnzInColumn, n, 0 ); + + unsigned *columnOrdering = new unsigned[n]; + unsigned *rowOrdering = new unsigned[m]; + + for ( unsigned i = 0; i < m; ++i ) + rowOrdering[i] = i; + + for ( unsigned i = 0; i < n; ++i ) + columnOrdering[i] = i; + + // Initialize the counters + for ( unsigned i = 0; i < m; ++i ) + { + for ( unsigned j = 0; j < n; ++j ) + { + if ( !FloatUtils::isZero( constraintMatrix[i * n + j] ) ) + { + ++nnzInRow[i]; + ++nnzInColumn[j]; + } + } + } + + DEBUG( { + for ( unsigned i = 0; i < m; ++i ) + { + ASSERT( nnzInRow[i] > 0 ); + } + } ); + + unsigned numExcluded = 0; + unsigned numTriangularRows = 0; + unsigned temp; + + while ( numExcluded + numTriangularRows < n ) + { + // Do we have a singleton row? + unsigned singletonRow = m; + for ( unsigned i = numTriangularRows; i < m; ++i ) + { + if ( nnzInRow[i] == 1 ) + { + singletonRow = i; + break; + } + } + + if ( singletonRow < m ) + { + // Have a singleton row! Swap it to the top and update counters + temp = rowOrdering[singletonRow]; + rowOrdering[singletonRow] = rowOrdering[numTriangularRows]; + rowOrdering[numTriangularRows] = temp; + + temp = nnzInRow[numTriangularRows]; + nnzInRow[numTriangularRows] = nnzInRow[singletonRow]; + nnzInRow[singletonRow] = temp; + + // Find the non-zero entry in the row and swap it to the diagonal + DEBUG( bool foundNonZero = false ); + for ( unsigned i = numTriangularRows; i < n - numExcluded; ++i ) + { + if ( !FloatUtils::isZero( constraintMatrix[rowOrdering[numTriangularRows] * n + + columnOrdering[i]] ) ) + { + temp = columnOrdering[i]; + columnOrdering[i] = columnOrdering[numTriangularRows]; + columnOrdering[numTriangularRows] = temp; + + temp = nnzInColumn[numTriangularRows]; + nnzInColumn[numTriangularRows] = nnzInColumn[i]; + nnzInColumn[i] = temp; + + DEBUG( foundNonZero = true ); + break; + } + } + + ASSERT( foundNonZero ); + + // Remove all entries under the diagonal entry from the row counters + for ( unsigned i = numTriangularRows + 1; i < m; ++i ) + { + if ( !FloatUtils::isZero( constraintMatrix[rowOrdering[i] * n + + columnOrdering[numTriangularRows]] ) ) + --nnzInRow[i]; + } + + ++numTriangularRows; + } + else + { + // No singleton rows. Exclude the densest column + unsigned maxDensity = nnzInColumn[numTriangularRows]; + unsigned column = numTriangularRows; + + for ( unsigned i = numTriangularRows; i < n - numExcluded; ++i ) + { + if ( nnzInColumn[i] > maxDensity ) + { + maxDensity = nnzInColumn[i]; + column = i; + } + } + + // Update the row counters to account for the excluded column + for ( unsigned i = numTriangularRows; i < m; ++i ) + { + double element = constraintMatrix[rowOrdering[i] * n + columnOrdering[column]]; + if ( !FloatUtils::isZero( element ) ) + { + ASSERT( nnzInRow[i] > 1 ); + --nnzInRow[i]; + } + } + + columnOrdering[column] = columnOrdering[n - 1 - numExcluded]; + nnzInColumn[column] = nnzInColumn[n - 1 - numExcluded]; + ++numExcluded; + } + } + + // Final basis: diagonalized columns + non-diagonalized rows + List result; + + for ( unsigned i = 0; i < numTriangularRows; ++i ) + { + initialBasis.append( columnOrdering[i] ); + } + + for ( unsigned i = numTriangularRows; i < m; ++i ) + { + basicRows.append( rowOrdering[i] ); + } + + // Cleanup + delete[] nnzInRow; + delete[] nnzInColumn; + delete[] columnOrdering; + delete[] rowOrdering; +} + +void Engine::addAuxiliaryVariables() +{ + List &equations( _preprocessedQuery->getEquations() ); + + unsigned m = equations.size(); + unsigned originalN = _preprocessedQuery->getNumberOfVariables(); + unsigned n = originalN + m; + + _preprocessedQuery->setNumberOfVariables( n ); + + // Add auxiliary variables to the equations and set their bounds + unsigned count = 0; + for ( auto &eq : equations ) + { + unsigned auxVar = originalN + count; + if ( _produceUNSATProofs ) + _preprocessedQuery->_lastAddendToAux.insert( eq._addends.back()._variable, auxVar ); + eq.addAddend( -1, auxVar ); + _preprocessedQuery->setLowerBound( auxVar, eq._scalar ); + _preprocessedQuery->setUpperBound( auxVar, eq._scalar ); + eq.setScalar( 0 ); + + ++count; + } +} + +void Engine::augmentInitialBasisIfNeeded( List &initialBasis, + const List &basicRows ) +{ + unsigned m = _preprocessedQuery->getEquations().size(); + unsigned n = _preprocessedQuery->getNumberOfVariables(); + unsigned originalN = n - m; + + if ( initialBasis.size() != m ) + { + for ( const auto &basicRow : basicRows ) + initialBasis.append( basicRow + originalN ); + } +} + +void Engine::initializeTableau( const double *constraintMatrix, const List &initialBasis ) +{ + const List &equations( _preprocessedQuery->getEquations() ); + unsigned m = equations.size(); + unsigned n = _preprocessedQuery->getNumberOfVariables(); + + _tableau->setDimensions( m, n ); + + adjustWorkMemorySize(); + + unsigned equationIndex = 0; + for ( const auto &equation : equations ) + { + _tableau->setRightHandSide( equationIndex, equation._scalar ); + ++equationIndex; + } + + // Populate constriant matrix + _tableau->setConstraintMatrix( constraintMatrix ); + + _tableau->registerToWatchAllVariables( _rowBoundTightener ); + _tableau->registerResizeWatcher( _rowBoundTightener ); + + _rowBoundTightener->setDimensions(); + + initializeBoundsAndConstraintWatchersInTableau( n ); + + _tableau->initializeTableau( initialBasis ); + + _costFunctionManager->initialize(); + _tableau->registerCostFunctionManager( _costFunctionManager ); + _activeEntryStrategy->initialize( _tableau ); +} + +void Engine::initializeBoundsAndConstraintWatchersInTableau( unsigned numberOfVariables ) +{ + _plConstraints = _preprocessedQuery->getPiecewiseLinearConstraints(); + for ( const auto &constraint : _plConstraints ) + { + constraint->registerAsWatcher( _tableau ); + constraint->setStatistics( &_statistics ); + + // Assuming aux var is use, add the constraint's auxiliary variable assigned to it in the + // tableau, to the constraint + if ( _produceUNSATProofs ) + for ( unsigned var : constraint->getNativeAuxVars() ) + if ( _preprocessedQuery->_lastAddendToAux.exists( var ) ) + constraint->addTableauAuxVar( _preprocessedQuery->_lastAddendToAux.at( var ), + var ); + } + + _nlConstraints = _preprocessedQuery->getNonlinearConstraints(); + for ( const auto &constraint : _nlConstraints ) + { + constraint->registerAsWatcher( _tableau ); + constraint->setStatistics( &_statistics ); + } + + for ( unsigned i = 0; i < numberOfVariables; ++i ) + { + _tableau->setLowerBound( i, _preprocessedQuery->getLowerBound( i ) ); + _tableau->setUpperBound( i, _preprocessedQuery->getUpperBound( i ) ); + } + _boundManager.storeLocalBounds(); + + _statistics.setUnsignedAttribute( Statistics::NUM_PL_CONSTRAINTS, _plConstraints.size() ); +} + +void Engine::initializeNetworkLevelReasoning() +{ + _networkLevelReasoner = _preprocessedQuery->getNetworkLevelReasoner(); + + if ( _networkLevelReasoner ) + { + _networkLevelReasoner->computeSuccessorLayers(); + _networkLevelReasoner->setTableau( _tableau ); + if ( Options::get()->getBool( Options::DUMP_TOPOLOGY ) ) + { + _networkLevelReasoner->dumpTopology( false ); + std::cout << std::endl; + } + } +} + +bool Engine::processInputQuery( const IQuery &inputQuery, bool preprocess ) +{ + ENGINE_LOG( "processInputQuery starting\n" ); + struct timespec start = TimeUtils::sampleMicro(); + + try + { + invokePreprocessor( inputQuery, preprocess ); + if ( _verbosity > 1 ) + printInputBounds( inputQuery ); + initializeNetworkLevelReasoning(); + if ( preprocess ) + { + performSymbolicBoundTightening( &( *_preprocessedQuery ) ); + performSimulation(); + performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); + performAdditionalBackwardAnalysisIfNeeded(); + } + + if ( GlobalConfiguration::PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING ) + for ( auto &plConstraint : _preprocessedQuery->getPiecewiseLinearConstraints() ) + plConstraint->addAuxiliaryEquationsAfterPreprocessing( *_preprocessedQuery ); + + if ( GlobalConfiguration::NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING ) + for ( auto &nlConstraint : _preprocessedQuery->getNonlinearConstraints() ) + nlConstraint->addAuxiliaryEquationsAfterPreprocessing( *_preprocessedQuery ); + + if ( _produceUNSATProofs ) + { + for ( auto &plConstraint : _preprocessedQuery->getPiecewiseLinearConstraints() ) + { + if ( !UNSATCertificateUtils::getSupportedActivations().exists( + plConstraint->getType() ) ) + { + _produceUNSATProofs = false; + Options::get()->setBool( Options::PRODUCE_PROOFS, false ); + String activationType = + plConstraint->serializeToString().tokenize( "," ).back(); + printf( + "Turning off proof production since activation %s is not yet supported\n", + activationType.ascii() ); + break; + } + } + } + + if ( _lpSolverType == LPSolverType::NATIVE ) + { + double *constraintMatrix = createConstraintMatrix(); + removeRedundantEquations( constraintMatrix ); + + // The equations have changed, recreate the constraint matrix + delete[] constraintMatrix; + constraintMatrix = createConstraintMatrix(); + + List initialBasis; + List basicRows; + selectInitialVariablesForBasis( constraintMatrix, initialBasis, basicRows ); + addAuxiliaryVariables(); + augmentInitialBasisIfNeeded( initialBasis, basicRows ); + + storeEquationsInDegradationChecker(); + + // The equations have changed, recreate the constraint matrix + delete[] constraintMatrix; + constraintMatrix = createConstraintMatrix(); + + unsigned n = _preprocessedQuery->getNumberOfVariables(); + _boundManager.initialize( n ); + + initializeTableau( constraintMatrix, initialBasis ); + _boundManager.initializeBoundExplainer( n, _tableau->getM() ); + delete[] constraintMatrix; + + if ( _produceUNSATProofs ) + { + _UNSATCertificate = new UnsatCertificateNode( NULL, PiecewiseLinearCaseSplit() ); + _UNSATCertificateCurrentPointer->set( _UNSATCertificate ); + _UNSATCertificate->setVisited(); + _groundBoundManager.initialize( n ); + + for ( unsigned i = 0; i < n; ++i ) + { + _groundBoundManager.setUpperBound( i, _preprocessedQuery->getUpperBound( i ) ); + _groundBoundManager.setLowerBound( i, _preprocessedQuery->getLowerBound( i ) ); + } + } + } + else + { + ASSERT( _lpSolverType == LPSolverType::GUROBI ); + + ASSERT( GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH == true ); + + if ( _verbosity > 0 ) + printf( "Using Gurobi to solve LP...\n" ); + + unsigned n = _preprocessedQuery->getNumberOfVariables(); + unsigned m = _preprocessedQuery->getEquations().size(); + // Only use BoundManager to store the bounds. + _boundManager.initialize( n ); + _tableau->setDimensions( m, n ); + initializeBoundsAndConstraintWatchersInTableau( n ); + } + + for ( const auto &constraint : _plConstraints ) + constraint->registerTableau( _tableau ); + for ( const auto &constraint : _nlConstraints ) + constraint->registerTableau( _tableau ); + + if ( _networkLevelReasoner && Options::get()->getBool( Options::DUMP_BOUNDS ) ) + _networkLevelReasoner->dumpBounds(); + + if ( GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH ) + { + _soiManager = std::unique_ptr( + new SumOfInfeasibilitiesManager( *_preprocessedQuery, *_tableau ) ); + _soiManager->setStatistics( &_statistics ); + } + + if ( GlobalConfiguration::WARM_START ) + warmStart(); + + decideBranchingHeuristics(); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.setLongAttribute( Statistics::PREPROCESSING_TIME_MICRO, + TimeUtils::timePassed( start, end ) ); + + if ( !_tableau->allBoundsValid() ) + { + // Some variable bounds are invalid, so the query is unsat + throw InfeasibleQueryException(); + } + } + catch ( const InfeasibleQueryException & ) + { + ENGINE_LOG( "processInputQuery done\n" ); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.setLongAttribute( Statistics::PREPROCESSING_TIME_MICRO, + TimeUtils::timePassed( start, end ) ); + + _exitCode = Engine::UNSAT; + return false; + } + + ENGINE_LOG( "processInputQuery done\n" ); + + DEBUG( { + // Initially, all constraints should be active + for ( const auto &plc : _plConstraints ) + { + ASSERT( plc->isActive() ); + } + } ); + + _smtCore.storeDebuggingSolution( _preprocessedQuery->_debuggingSolution ); + return true; +} + +void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) +{ + if ( _networkLevelReasoner && Options::get()->gurobiEnabled() ) + { + // Obtain from and store bounds into inputquery if it is not null. + if ( inputQuery ) + _networkLevelReasoner->obtainCurrentBounds( *inputQuery ); + else + _networkLevelReasoner->obtainCurrentBounds(); + + // TODO: Remove this block after getting ready to support sigmoid with MILP Bound + // Tightening. + if ( ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::MILP_ENCODING || + _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL || + _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION ) && + _preprocessedQuery->getNonlinearConstraints().size() > 0 ) + throw MarabouError( MarabouError::FEATURE_NOT_YET_SUPPORTED, + "Marabou doesn't support sigmoid with MILP Bound Tightening" ); + + switch ( _milpSolverBoundTighteningType ) + { + case MILPSolverBoundTighteningType::LP_RELAXATION: + case MILPSolverBoundTighteningType::LP_RELAXATION_INCREMENTAL: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: + _networkLevelReasoner->lpRelaxationPropagation(); + break; + case MILPSolverBoundTighteningType::MILP_ENCODING: + case MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL: + _networkLevelReasoner->MILPPropagation(); + break; + case MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION: + _networkLevelReasoner->iterativePropagation(); + break; + case MILPSolverBoundTighteningType::NONE: + return; + } + List tightenings; + _networkLevelReasoner->getConstraintTightenings( tightenings ); + + + if ( inputQuery ) + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB && + FloatUtils::gt( tightening._value, + inputQuery->getLowerBound( tightening._variable ) ) ) + inputQuery->setLowerBound( tightening._variable, tightening._value ); + if ( tightening._type == Tightening::UB && + FloatUtils::lt( tightening._value, + inputQuery->getUpperBound( tightening._variable ) ) ) + inputQuery->setUpperBound( tightening._variable, tightening._value ); + } + } + else + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + _tableau->tightenLowerBound( tightening._variable, tightening._value ); + + else if ( tightening._type == Tightening::UB ) + _tableau->tightenUpperBound( tightening._variable, tightening._value ); + } + } + } +} + +void Engine::performAdditionalBackwardAnalysisIfNeeded() +{ + if ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE || + _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE ) + { + unsigned tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); + if ( _verbosity > 0 ) + printf( "Backward analysis tightened %u bounds\n", tightened ); + while ( tightened && + _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE && + GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS ) + { + performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); + tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); + if ( _verbosity > 0 ) + printf( "Backward analysis tightened %u bounds\n", tightened ); + } + } +} + +void Engine::performMILPSolverBoundedTighteningForSingleLayer( unsigned targetIndex ) +{ + if ( _produceUNSATProofs ) + return; + + if ( _networkLevelReasoner && _isGurobyEnabled && _performLpTighteningAfterSplit && + _milpSolverBoundTighteningType != MILPSolverBoundTighteningType::NONE ) + { + _networkLevelReasoner->obtainCurrentBounds(); + _networkLevelReasoner->clearConstraintTightenings(); + + switch ( _milpSolverBoundTighteningType ) + { + case MILPSolverBoundTighteningType::LP_RELAXATION: + _networkLevelReasoner->LPTighteningForOneLayer( targetIndex ); + break; + case MILPSolverBoundTighteningType::LP_RELAXATION_INCREMENTAL: + return; + case MILPSolverBoundTighteningType::MILP_ENCODING: + _networkLevelReasoner->MILPTighteningForOneLayer( targetIndex ); + break; + case MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL: + return; + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: + case MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION: + case MILPSolverBoundTighteningType::NONE: + return; + } + List tightenings; + _networkLevelReasoner->getConstraintTightenings( tightenings ); + + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + _tableau->tightenLowerBound( tightening._variable, tightening._value ); + + else if ( tightening._type == Tightening::UB ) + _tableau->tightenUpperBound( tightening._variable, tightening._value ); + } + } +} + +void Engine::extractSolution( IQuery &inputQuery, Preprocessor *preprocessor ) +{ + Preprocessor *preprocessorInUse = nullptr; + if ( preprocessor != nullptr ) + preprocessorInUse = preprocessor; + else if ( _preprocessingEnabled ) + preprocessorInUse = &_preprocessor; + + for ( unsigned i = 0; i < inputQuery.getNumberOfVariables(); ++i ) + { + if ( preprocessorInUse ) + { + // Symbolically fixed variables are skipped. They will be re-constructed in the end. + if ( preprocessorInUse->variableIsUnusedAndSymbolicallyFixed( i ) ) + continue; + + // Has the variable been merged into another? + unsigned variable = i; + while ( preprocessorInUse->variableIsMerged( variable ) ) + variable = preprocessorInUse->getMergedIndex( variable ); + + // Fixed variables are easy: return the value they've been fixed to. + if ( preprocessorInUse->variableIsFixed( variable ) ) + { + inputQuery.setSolutionValue( i, preprocessorInUse->getFixedValue( variable ) ); + continue; + } + + // We know which variable to look for, but it may have been assigned + // a new index, due to variable elimination + variable = preprocessorInUse->getNewIndex( variable ); + + // Finally, set the assigned value + inputQuery.setSolutionValue( i, _tableau->getValue( variable ) ); + } + else + { + inputQuery.setSolutionValue( i, _tableau->getValue( i ) ); + } + } + + if ( preprocessorInUse ) + { + /* + Recover the assignment of eliminated neurons (e.g., due to merging of WS layers) + */ + preprocessorInUse->setSolutionValuesOfEliminatedNeurons( inputQuery ); + } +} + +bool Engine::allVarsWithinBounds() const +{ + if ( _lpSolverType == LPSolverType::GUROBI ) + { + ASSERT( _gurobi ); + return _gurobi->haveFeasibleSolution(); + } + else + return !_tableau->existsBasicOutOfBounds(); +} + +void Engine::collectViolatedPlConstraints() +{ + _violatedPlConstraints.clear(); + for ( const auto &constraint : _plConstraints ) + { + if ( constraint->isActive() && !constraint->satisfied() ) + _violatedPlConstraints.append( constraint ); + } +} + +bool Engine::allPlConstraintsHold() +{ + return _violatedPlConstraints.empty(); +} + +bool Engine::allNonlinearConstraintsHold() +{ + for ( const auto &constraint : _nlConstraints ) + { + if ( !constraint->satisfied() ) + return false; + } + return true; +} + +bool Engine::hasBranchingCandidate() +{ + for ( const auto &constraint : _plConstraints ) + { + if ( constraint->isActive() && !constraint->phaseFixed() ) + return true; + } + return false; +} + +void Engine::selectViolatedPlConstraint() +{ + ASSERT( !_violatedPlConstraints.empty() ); + + _plConstraintToFix = _smtCore.chooseViolatedConstraintForFixing( _violatedPlConstraints ); + + ASSERT( _plConstraintToFix ); +} + +void Engine::reportPlViolation() +{ + _smtCore.reportViolatedConstraint( _plConstraintToFix ); +} + +void Engine::storeState( EngineState &state, TableauStateStorageLevel level ) const +{ + _tableau->storeState( state._tableauState, level ); + state._tableauStateStorageLevel = level; + + for ( const auto &constraint : _plConstraints ) + state._plConstraintToState[constraint] = constraint->duplicateConstraint(); + + state._numPlConstraintsDisabledByValidSplits = _numPlConstraintsDisabledByValidSplits; +} + +void Engine::restoreState( const EngineState &state ) +{ + ENGINE_LOG( "Restore state starting" ); + + if ( state._tableauStateStorageLevel == TableauStateStorageLevel::STORE_NONE ) + throw MarabouError( MarabouError::RESTORING_ENGINE_FROM_INVALID_STATE ); + + ENGINE_LOG( "\tRestoring tableau state" ); + _tableau->restoreState( state._tableauState, state._tableauStateStorageLevel ); + + ENGINE_LOG( "\tRestoring constraint states" ); + for ( auto &constraint : _plConstraints ) + { + if ( !state._plConstraintToState.exists( constraint ) ) + throw MarabouError( MarabouError::MISSING_PL_CONSTRAINT_STATE ); + + constraint->restoreState( state._plConstraintToState[constraint] ); + } + + _numPlConstraintsDisabledByValidSplits = state._numPlConstraintsDisabledByValidSplits; + + if ( _lpSolverType == LPSolverType::NATIVE ) + { + // Make sure the data structures are initialized to the correct size + _rowBoundTightener->setDimensions(); + adjustWorkMemorySize(); + _activeEntryStrategy->resizeHook( _tableau ); + _costFunctionManager->initialize(); + } + + // Reset the violation counts in the SMT core + _smtCore.resetSplitConditions(); +} + +void Engine::setNumPlConstraintsDisabledByValidSplits( unsigned numConstraints ) +{ + _numPlConstraintsDisabledByValidSplits = numConstraints; +} + +bool Engine::attemptToMergeVariables( unsigned x1, unsigned x2 ) +{ + /* + First, we need to ensure that the variables are both non-basic. + */ + + unsigned n = _tableau->getN(); + unsigned m = _tableau->getM(); + + if ( _tableau->isBasic( x1 ) ) + { + TableauRow x1Row( n - m ); + _tableau->getTableauRow( _tableau->variableToIndex( x1 ), &x1Row ); + + bool found = false; + double bestCoefficient = 0.0; + unsigned nonBasic = 0; + for ( unsigned i = 0; i < n - m; ++i ) + { + if ( x1Row._row[i]._var != x2 ) + { + double contender = FloatUtils::abs( x1Row._row[i]._coefficient ); + if ( FloatUtils::gt( contender, bestCoefficient ) ) + { + found = true; + nonBasic = x1Row._row[i]._var; + bestCoefficient = contender; + } + } + } + + if ( !found ) + return false; + + _tableau->setEnteringVariableIndex( _tableau->variableToIndex( nonBasic ) ); + _tableau->setLeavingVariableIndex( _tableau->variableToIndex( x1 ) ); + + // Make sure the change column and pivot row are up-to-date - strategies + // such as projected steepest edge need these for their internal updates. + _tableau->computeChangeColumn(); + _tableau->computePivotRow(); + + _activeEntryStrategy->prePivotHook( _tableau, false ); + _tableau->performDegeneratePivot(); + _activeEntryStrategy->postPivotHook( _tableau, false ); + } + + if ( _tableau->isBasic( x2 ) ) + { + TableauRow x2Row( n - m ); + _tableau->getTableauRow( _tableau->variableToIndex( x2 ), &x2Row ); + + bool found = false; + double bestCoefficient = 0.0; + unsigned nonBasic = 0; + for ( unsigned i = 0; i < n - m; ++i ) + { + if ( x2Row._row[i]._var != x1 ) + { + double contender = FloatUtils::abs( x2Row._row[i]._coefficient ); + if ( FloatUtils::gt( contender, bestCoefficient ) ) + { + found = true; + nonBasic = x2Row._row[i]._var; + bestCoefficient = contender; + } + } + } + + if ( !found ) + return false; + + _tableau->setEnteringVariableIndex( _tableau->variableToIndex( nonBasic ) ); + _tableau->setLeavingVariableIndex( _tableau->variableToIndex( x2 ) ); + + // Make sure the change column and pivot row are up-to-date - strategies + // such as projected steepest edge need these for their internal updates. + _tableau->computeChangeColumn(); + _tableau->computePivotRow(); + + _activeEntryStrategy->prePivotHook( _tableau, false ); + _tableau->performDegeneratePivot(); + _activeEntryStrategy->postPivotHook( _tableau, false ); + } + + // Both variables are now non-basic, so we can merge their columns + _tableau->mergeColumns( x1, x2 ); + DEBUG( _tableau->verifyInvariants() ); + + // Reset the entry strategy + _activeEntryStrategy->initialize( _tableau ); + + return true; +} + +void Engine::applySplit( const PiecewiseLinearCaseSplit &split ) +{ + ENGINE_LOG( "" ); + ENGINE_LOG( "Applying a split. " ); + + DEBUG( _tableau->verifyInvariants() ); + + List bounds = split.getBoundTightenings(); + List equations = split.getEquations(); + + // We assume that case splits only apply new bounds but do not apply + // new equations. This can always be made possible. + if ( _lpSolverType != LPSolverType::NATIVE && equations.size() > 0 ) + throw MarabouError( MarabouError::FEATURE_NOT_YET_SUPPORTED, + "Can only update bounds when using non-native" + "simplex engine!" ); + + for ( auto &equation : equations ) + { + /* + First, adjust the equation if any variables have been merged. + E.g., if the equation is x1 + x2 + x3 = 0, and x1 and x2 have been + merged, the equation becomes 2x1 + x3 = 0 + */ + for ( auto &addend : equation._addends ) + addend._variable = _tableau->getVariableAfterMerging( addend._variable ); + + List::iterator addend; + List::iterator otherAddend; + + addend = equation._addends.begin(); + while ( addend != equation._addends.end() ) + { + otherAddend = addend; + ++otherAddend; + + while ( otherAddend != equation._addends.end() ) + { + if ( otherAddend->_variable == addend->_variable ) + { + addend->_coefficient += otherAddend->_coefficient; + otherAddend = equation._addends.erase( otherAddend ); + } + else + ++otherAddend; + } + + if ( FloatUtils::isZero( addend->_coefficient ) ) + addend = equation._addends.erase( addend ); + else + ++addend; + } + + /* + In the general case, we just add the new equation to the tableau. + However, we also support a very common case: equations of the form + x1 = x2, which are common, e.g., with ReLUs. For these equations we + may be able to merge two columns of the tableau. + */ + unsigned x1, x2; + bool canMergeColumns = + // Only if the flag is on + GlobalConfiguration::USE_COLUMN_MERGING_EQUATIONS && + // Only if the equation has the correct form + equation.isVariableMergingEquation( x1, x2 ) && + // And only if the variables are not out of bounds + ( !_tableau->isBasic( x1 ) || + !_tableau->basicOutOfBounds( _tableau->variableToIndex( x1 ) ) ) && + ( !_tableau->isBasic( x2 ) || + !_tableau->basicOutOfBounds( _tableau->variableToIndex( x2 ) ) ); + + bool columnsSuccessfullyMerged = false; + if ( canMergeColumns ) + columnsSuccessfullyMerged = attemptToMergeVariables( x1, x2 ); + + if ( !columnsSuccessfullyMerged ) + { + // General case: add a new equation to the tableau + unsigned auxVariable = _tableau->addEquation( equation ); + _activeEntryStrategy->resizeHook( _tableau ); + + switch ( equation._type ) + { + case Equation::GE: + bounds.append( Tightening( auxVariable, 0.0, Tightening::UB ) ); + break; + + case Equation::LE: + bounds.append( Tightening( auxVariable, 0.0, Tightening::LB ) ); + break; + + case Equation::EQ: + bounds.append( Tightening( auxVariable, 0.0, Tightening::LB ) ); + bounds.append( Tightening( auxVariable, 0.0, Tightening::UB ) ); + break; + + default: + ASSERT( false ); + break; + } + } + } + + if ( _lpSolverType == LPSolverType::NATIVE ) + { + adjustWorkMemorySize(); + } + + for ( auto &bound : bounds ) + { + unsigned variable = _tableau->getVariableAfterMerging( bound._variable ); + + if ( bound._type == Tightening::LB ) + { + ENGINE_LOG( + Stringf( "x%u: lower bound set to %.3lf", variable, bound._value ).ascii() ); + if ( _produceUNSATProofs && + FloatUtils::gt( bound._value, _boundManager.getLowerBound( bound._variable ) ) ) + { + _boundManager.resetExplanation( variable, Tightening::LB ); + updateGroundLowerBound( variable, bound._value ); + _boundManager.tightenLowerBound( variable, bound._value ); + } + else if ( !_produceUNSATProofs ) + _boundManager.tightenLowerBound( variable, bound._value ); + } + else + { + ENGINE_LOG( + Stringf( "x%u: upper bound set to %.3lf", variable, bound._value ).ascii() ); + if ( _produceUNSATProofs && + FloatUtils::lt( bound._value, _boundManager.getUpperBound( bound._variable ) ) ) + { + _boundManager.resetExplanation( variable, Tightening::UB ); + updateGroundUpperBound( variable, bound._value ); + _boundManager.tightenUpperBound( variable, bound._value ); + } + else if ( !_produceUNSATProofs ) + _boundManager.tightenUpperBound( variable, bound._value ); + } + } + + if ( _produceUNSATProofs && _UNSATCertificateCurrentPointer ) + ( **_UNSATCertificateCurrentPointer ).setVisited(); + + DEBUG( _tableau->verifyInvariants() ); + ENGINE_LOG( "Done with split\n" ); +} + +void Engine::applyBoundTightenings() +{ + List tightenings; + _boundManager.getTightenings( tightenings ); + + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + _tableau->tightenLowerBound( tightening._variable, tightening._value ); + else + _tableau->tightenUpperBound( tightening._variable, tightening._value ); + } +} + +void Engine::applyAllRowTightenings() +{ + applyBoundTightenings(); +} + +void Engine::applyAllConstraintTightenings() +{ + applyBoundTightenings(); +} + +void Engine::applyAllBoundTightenings() +{ + struct timespec start = TimeUtils::sampleMicro(); + + if ( _lpSolverType == LPSolverType::NATIVE ) + applyAllRowTightenings(); + applyAllConstraintTightenings(); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_APPLYING_STORED_TIGHTENINGS_MICRO, + TimeUtils::timePassed( start, end ) ); +} + +bool Engine::applyAllValidConstraintCaseSplits() +{ + struct timespec start = TimeUtils::sampleMicro(); + + bool appliedSplit = false; + for ( auto &constraint : _plConstraints ) + if ( applyValidConstraintCaseSplit( constraint ) ) + appliedSplit = true; + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_PERFORMING_VALID_CASE_SPLITS_MICRO, + TimeUtils::timePassed( start, end ) ); + + return appliedSplit; +} + +bool Engine::applyValidConstraintCaseSplit( PiecewiseLinearConstraint *constraint ) +{ + if ( constraint->isActive() && constraint->phaseFixed() ) + { + String constraintString; + constraint->dump( constraintString ); + ENGINE_LOG( Stringf( "A constraint has become valid. Dumping constraint: %s", + constraintString.ascii() ) + .ascii() ); + + constraint->setActiveConstraint( false ); + PiecewiseLinearCaseSplit validSplit = constraint->getValidCaseSplit(); + _smtCore.recordImpliedValidSplit( validSplit ); + applySplit( validSplit ); + + if ( _soiManager ) + _soiManager->removeCostComponentFromHeuristicCost( constraint ); + ++_numPlConstraintsDisabledByValidSplits; + + return true; + } + + return false; +} + +bool Engine::shouldCheckDegradation() +{ + return _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ) % + GlobalConfiguration::DEGRADATION_CHECKING_FREQUENCY == + 0; +} + +bool Engine::highDegradation() +{ + struct timespec start = TimeUtils::sampleMicro(); + + double degradation = _degradationChecker.computeDegradation( *_tableau ); + _statistics.setDoubleAttribute( Statistics::CURRENT_DEGRADATION, degradation ); + if ( FloatUtils::gt( degradation, + _statistics.getDoubleAttribute( Statistics::MAX_DEGRADATION ) ) ) + _statistics.setDoubleAttribute( Statistics::MAX_DEGRADATION, degradation ); + + bool result = FloatUtils::gt( degradation, GlobalConfiguration::DEGRADATION_THRESHOLD ); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_DEGRADATION_CHECKING, + TimeUtils::timePassed( start, end ) ); + + // Debug + if ( result ) + printf( "High degradation found!\n" ); + // + + return result; +} + +void Engine::tightenBoundsOnConstraintMatrix() +{ + struct timespec start = TimeUtils::sampleMicro(); + + if ( _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ) % + GlobalConfiguration::BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY == + 0 ) + { + _rowBoundTightener->examineConstraintMatrix( true ); + _statistics.incLongAttribute( Statistics::NUM_BOUND_TIGHTENINGS_ON_CONSTRAINT_MATRIX ); + } + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_CONSTRAINT_MATRIX_BOUND_TIGHTENING_MICRO, + TimeUtils::timePassed( start, end ) ); +} + +void Engine::explicitBasisBoundTightening() +{ + struct timespec start = TimeUtils::sampleMicro(); + + bool saturation = GlobalConfiguration::EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION; + + _statistics.incLongAttribute( Statistics::NUM_BOUND_TIGHTENINGS_ON_EXPLICIT_BASIS ); + + switch ( GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE ) + { + case GlobalConfiguration::COMPUTE_INVERTED_BASIS_MATRIX: + _rowBoundTightener->examineInvertedBasisMatrix( saturation ); + break; + + case GlobalConfiguration::USE_IMPLICIT_INVERTED_BASIS_MATRIX: + _rowBoundTightener->examineImplicitInvertedBasisMatrix( saturation ); + break; + + case GlobalConfiguration::DISABLE_EXPLICIT_BASIS_TIGHTENING: + break; + } + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_EXPLICIT_BASIS_BOUND_TIGHTENING_MICRO, + TimeUtils::timePassed( start, end ) ); +} + +void Engine::performPrecisionRestoration( PrecisionRestorer::RestoreBasics restoreBasics ) +{ + struct timespec start = TimeUtils::sampleMicro(); + + // debug + double before = _degradationChecker.computeDegradation( *_tableau ); + // + + _precisionRestorer.restorePrecision( *this, *_tableau, _smtCore, restoreBasics ); + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_PRECISION_RESTORATION, + TimeUtils::timePassed( start, end ) ); + + _statistics.incUnsignedAttribute( Statistics::NUM_PRECISION_RESTORATIONS ); + + // debug + double after = _degradationChecker.computeDegradation( *_tableau ); + if ( _verbosity > 0 ) + printf( "Performing precision restoration. Degradation before: %.15lf. After: %.15lf\n", + before, + after ); + // + + if ( highDegradation() && ( restoreBasics == PrecisionRestorer::RESTORE_BASICS ) ) + { + // First round, with basic restoration, still resulted in high degradation. + // Try again! + start = TimeUtils::sampleMicro(); + _precisionRestorer.restorePrecision( + *this, *_tableau, _smtCore, PrecisionRestorer::DO_NOT_RESTORE_BASICS ); + end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_PRECISION_RESTORATION, + TimeUtils::timePassed( start, end ) ); + _statistics.incUnsignedAttribute( Statistics::NUM_PRECISION_RESTORATIONS ); + + // debug + double afterSecond = _degradationChecker.computeDegradation( *_tableau ); + if ( _verbosity > 0 ) + printf( + "Performing 2nd precision restoration. Degradation before: %.15lf. After: %.15lf\n", + after, + afterSecond ); + + if ( highDegradation() ) + throw MarabouError( MarabouError::RESTORATION_FAILED_TO_RESTORE_PRECISION ); + } +} + +void Engine::storeInitialEngineState() +{ + if ( !_initialStateStored ) + { + _precisionRestorer.storeInitialEngineState( *this ); + _initialStateStored = true; + } +} + +bool Engine::basisRestorationNeeded() const +{ + return _basisRestorationRequired == Engine::STRONG_RESTORATION_NEEDED || + _basisRestorationRequired == Engine::WEAK_RESTORATION_NEEDED; +} + +const Statistics *Engine::getStatistics() const +{ + return &_statistics; +} + +Query *Engine::getQuery() +{ + return &( *_preprocessedQuery ); +} + +void Engine::checkBoundCompliancyWithDebugSolution() +{ + if ( _smtCore.checkSkewFromDebuggingSolution() ) + { + // The stack is compliant, we should not have learned any non-compliant bounds + for ( const auto &var : _preprocessedQuery->_debuggingSolution ) + { + // printf( "Looking at var %u\n", var.first ); + + if ( FloatUtils::gt( _tableau->getLowerBound( var.first ), var.second, 1e-5 ) ) + { + printf( "Error! The stack is compliant, but learned an non-compliant bound: " + "Solution for x%u is %.15lf, but learned lower bound %.15lf\n", + var.first, + var.second, + _tableau->getLowerBound( var.first ) ); + + throw MarabouError( MarabouError::DEBUGGING_ERROR ); + } + + if ( FloatUtils::lt( _tableau->getUpperBound( var.first ), var.second, 1e-5 ) ) + { + printf( "Error! The stack is compliant, but learned an non-compliant bound: " + "Solution for %u is %.15lf, but learned upper bound %.15lf\n", + var.first, + var.second, + _tableau->getUpperBound( var.first ) ); + + throw MarabouError( MarabouError::DEBUGGING_ERROR ); + } + } + } +} + +void Engine::quitSignal() +{ + _quitRequested = true; +} + +Engine::ExitCode Engine::getExitCode() const +{ + return _exitCode; +} + +std::atomic_bool *Engine::getQuitRequested() +{ + return &_quitRequested; +} + +List Engine::getInputVariables() const +{ + return _preprocessedQuery->getInputVariables(); +} + +void Engine::performSimulation() +{ + if ( _simulationSize == 0 || !_networkLevelReasoner || + _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::NONE || + _produceUNSATProofs ) + { + ENGINE_LOG( Stringf( "Skip simulation..." ).ascii() ); + return; + } + + // outer vector is for neuron + // inner vector is for simulation value + Vector> simulations; + + std::mt19937 mt( GlobalConfiguration::SIMULATION_RANDOM_SEED ); + + for ( unsigned i = 0; i < _networkLevelReasoner->getLayer( 0 )->getSize(); ++i ) + { + std::uniform_real_distribution distribution( + _networkLevelReasoner->getLayer( 0 )->getLb( i ), + _networkLevelReasoner->getLayer( 0 )->getUb( i ) ); + Vector simulationInput( _simulationSize ); + + for ( unsigned j = 0; j < _simulationSize; ++j ) + simulationInput[j] = distribution( mt ); + simulations.append( simulationInput ); + } + _networkLevelReasoner->simulate( &simulations ); +} + +unsigned Engine::performSymbolicBoundTightening( Query *inputQuery ) +{ + if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::NONE || + ( !_networkLevelReasoner ) || _produceUNSATProofs ) + return 0; + + struct timespec start = TimeUtils::sampleMicro(); + + unsigned numTightenedBounds = 0; + + // Step 1: tell the NLR about the current bounds + if ( inputQuery ) + { + // Obtain from and store bounds into inputquery if it is not null. + _networkLevelReasoner->obtainCurrentBounds( *inputQuery ); + } + else + { + // Get bounds from Tableau. + _networkLevelReasoner->obtainCurrentBounds(); + } + + // Step 2: perform SBT + if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING ) + _networkLevelReasoner->symbolicBoundPropagation(); + else if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::DEEP_POLY ) + _networkLevelReasoner->deepPolyPropagation(); + + // Step 3: Extract the bounds + List tightenings; + _networkLevelReasoner->getConstraintTightenings( tightenings ); + + if ( inputQuery ) + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB && + FloatUtils::gt( tightening._value, + inputQuery->getLowerBound( tightening._variable ) ) ) + { + inputQuery->setLowerBound( tightening._variable, tightening._value ); + ++numTightenedBounds; + } + + if ( tightening._type == Tightening::UB && + FloatUtils::lt( tightening._value, + inputQuery->getUpperBound( tightening._variable ) ) ) + { + inputQuery->setUpperBound( tightening._variable, tightening._value ); + ++numTightenedBounds; + } + } + } + else + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB && + FloatUtils::gt( tightening._value, + _tableau->getLowerBound( tightening._variable ) ) ) + { + _tableau->tightenLowerBound( tightening._variable, tightening._value ); + ++numTightenedBounds; + } + + if ( tightening._type == Tightening::UB && + FloatUtils::lt( tightening._value, + _tableau->getUpperBound( tightening._variable ) ) ) + { + _tableau->tightenUpperBound( tightening._variable, tightening._value ); + ++numTightenedBounds; + } + } + } + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_PERFORMING_SYMBOLIC_BOUND_TIGHTENING, + TimeUtils::timePassed( start, end ) ); + _statistics.incLongAttribute( Statistics::NUM_TIGHTENINGS_FROM_SYMBOLIC_BOUND_TIGHTENING, + numTightenedBounds ); + return numTightenedBounds; +} + +bool Engine::shouldExitDueToTimeout( double timeout ) const +{ + // A timeout value of 0 means no time limit + if ( timeout == 0 ) + return false; + + return static_cast( _statistics.getTotalTimeInMicro() ) / MICROSECONDS_TO_SECONDS > + timeout; +} + +void Engine::preContextPushHook() +{ + struct timespec start = TimeUtils::sampleMicro(); + _boundManager.storeLocalBounds(); + if ( _produceUNSATProofs ) + _groundBoundManager.storeLocalBounds(); + struct timespec end = TimeUtils::sampleMicro(); + + _statistics.incLongAttribute( Statistics::TIME_CONTEXT_PUSH_HOOK, + TimeUtils::timePassed( start, end ) ); +} + +void Engine::postContextPopHook() +{ + struct timespec start = TimeUtils::sampleMicro(); + + _boundManager.restoreLocalBounds(); + if ( _produceUNSATProofs ) + _groundBoundManager.restoreLocalBounds(); + _tableau->postContextPopHook(); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_CONTEXT_POP_HOOK, + TimeUtils::timePassed( start, end ) ); +} + +void Engine::reset() +{ + resetStatistics(); + _sncMode = false; + clearViolatedPLConstraints(); + resetSmtCore(); + resetBoundTighteners(); + resetExitCode(); +} + +void Engine::resetStatistics() +{ + Statistics statistics; + _statistics = statistics; + _smtCore.setStatistics( &_statistics ); + _tableau->setStatistics( &_statistics ); + _rowBoundTightener->setStatistics( &_statistics ); + _preprocessor.setStatistics( &_statistics ); + _activeEntryStrategy->setStatistics( &_statistics ); + + _statistics.stampStartingTime(); +} + +void Engine::clearViolatedPLConstraints() +{ + _violatedPlConstraints.clear(); + _plConstraintToFix = NULL; +} + +void Engine::resetSmtCore() +{ + _smtCore.reset(); + _smtCore.initializeScoreTrackerIfNeeded( _plConstraints ); +} + +void Engine::resetExitCode() +{ + _exitCode = Engine::NOT_DONE; +} + +void Engine::resetBoundTighteners() +{ +} + +void Engine::warmStart() +{ + // An NLR is required for a warm start + if ( !_networkLevelReasoner ) + return; + + // First, choose an arbitrary assignment for the input variables + unsigned numInputVariables = _preprocessedQuery->getNumInputVariables(); + unsigned numOutputVariables = _preprocessedQuery->getNumOutputVariables(); + + if ( numInputVariables == 0 ) + { + // Trivial case: all inputs are fixed, nothing to evaluate + return; + } + + double *inputAssignment = new double[numInputVariables]; + double *outputAssignment = new double[numOutputVariables]; + + for ( unsigned i = 0; i < numInputVariables; ++i ) + { + unsigned variable = _preprocessedQuery->inputVariableByIndex( i ); + inputAssignment[i] = _tableau->getLowerBound( variable ); + } + + // Evaluate the network for this assignment + _networkLevelReasoner->evaluate( inputAssignment, outputAssignment ); + + // Try to update as many variables as possible to match their assignment + for ( unsigned i = 0; i < _networkLevelReasoner->getNumberOfLayers(); ++i ) + { + const NLR::Layer *layer = _networkLevelReasoner->getLayer( i ); + unsigned layerSize = layer->getSize(); + const double *assignment = layer->getAssignment(); + + for ( unsigned j = 0; j < layerSize; ++j ) + { + if ( layer->neuronHasVariable( j ) ) + { + unsigned variable = layer->neuronToVariable( j ); + if ( !_tableau->isBasic( variable ) ) + _tableau->setNonBasicAssignment( variable, assignment[j], false ); + } + } + } + + // We did what we could for the non-basics; now let the tableau compute + // the basic assignment + _tableau->computeAssignment(); + + delete[] outputAssignment; + delete[] inputAssignment; +} + +void Engine::checkOverallProgress() +{ + // Get fresh statistics + unsigned numVisitedStates = + _statistics.getUnsignedAttribute( Statistics::NUM_VISITED_TREE_STATES ); + unsigned long long currentIteration = + _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ); + + if ( numVisitedStates > _lastNumVisitedStates ) + { + // Progress has been made + _lastNumVisitedStates = numVisitedStates; + _lastIterationWithProgress = currentIteration; + } + else + { + // No progress has been made. If it's been too long, request a restoration + if ( currentIteration > + _lastIterationWithProgress + GlobalConfiguration::MAX_ITERATIONS_WITHOUT_PROGRESS ) + { + ENGINE_LOG( + "checkOverallProgress detected cycling. Requesting a precision restoration" ); + _basisRestorationRequired = Engine::STRONG_RESTORATION_NEEDED; + _lastIterationWithProgress = currentIteration; + } + } +} + +void Engine::updateDirections() +{ + if ( GlobalConfiguration::USE_POLARITY_BASED_DIRECTION_HEURISTICS ) + for ( const auto &constraint : _plConstraints ) + if ( constraint->supportPolarity() && constraint->isActive() && + !constraint->phaseFixed() ) + constraint->updateDirection(); +} + +void Engine::decideBranchingHeuristics() +{ + DivideStrategy divideStrategy = Options::get()->getDivideStrategy(); + if ( divideStrategy == DivideStrategy::Auto ) + { + if ( !_produceUNSATProofs && !_preprocessedQuery->getInputVariables().empty() && + _preprocessedQuery->getInputVariables().size() < + GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD ) + { + // NOTE: the benefit of input splitting is minimal with abstract intepretation disabled. + // Therefore, since the proof production mode does not currently support that, we do + // not perform input-splitting in proof production mode. + divideStrategy = DivideStrategy::LargestInterval; + if ( _verbosity >= 2 ) + printf( "Branching heuristics set to LargestInterval\n" ); + } + else + { + if ( GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH ) + { + divideStrategy = DivideStrategy::PseudoImpact; + if ( _verbosity >= 2 ) + printf( "Branching heuristics set to PseudoImpact\n" ); + } + else + { + divideStrategy = DivideStrategy::ReLUViolation; + if ( _verbosity >= 2 ) + printf( "Branching heuristics set to ReLUViolation\n" ); + } + } + } + ASSERT( divideStrategy != DivideStrategy::Auto ); + _smtCore.setBranchingHeuristics( divideStrategy ); + _smtCore.initializeScoreTrackerIfNeeded( _plConstraints ); +} + +PiecewiseLinearConstraint *Engine::pickSplitPLConstraintBasedOnPolarity() +{ + ENGINE_LOG( Stringf( "Using Polarity-based heuristics..." ).ascii() ); + + if ( !_networkLevelReasoner ) + return NULL; + + List constraints = + _networkLevelReasoner->getConstraintsInTopologicalOrder(); + + Map scoreToConstraint; + for ( auto &plConstraint : constraints ) + { + if ( plConstraint->supportPolarity() && plConstraint->isActive() && + !plConstraint->phaseFixed() ) + { + plConstraint->updateScoreBasedOnPolarity(); + scoreToConstraint[plConstraint->getScore()] = plConstraint; + if ( scoreToConstraint.size() >= GlobalConfiguration::POLARITY_CANDIDATES_THRESHOLD ) + break; + } + } + if ( scoreToConstraint.size() > 0 ) + { + ENGINE_LOG( Stringf( "Score of the picked ReLU: %f", ( *scoreToConstraint.begin() ).first ) + .ascii() ); + return ( *scoreToConstraint.begin() ).second; + } + else + return NULL; +} + +PiecewiseLinearConstraint *Engine::pickSplitPLConstraintBasedOnTopology() +{ + // We push the first unfixed ReLU in the topology order to the _candidatePlConstraints + ENGINE_LOG( Stringf( "Using EarliestReLU heuristics..." ).ascii() ); + + if ( !_networkLevelReasoner ) + throw MarabouError( MarabouError::NETWORK_LEVEL_REASONER_NOT_AVAILABLE ); + + List constraints = + _networkLevelReasoner->getConstraintsInTopologicalOrder(); + + for ( auto &plConstraint : constraints ) + { + if ( plConstraint->isActive() && !plConstraint->phaseFixed() ) + return plConstraint; + } + return NULL; +} + +PiecewiseLinearConstraint *Engine::pickSplitPLConstraintBasedOnIntervalWidth() +{ + // We push the first unfixed ReLU in the topology order to the _candidatePlConstraints + ENGINE_LOG( Stringf( "Using LargestInterval heuristics..." ).ascii() ); + + unsigned inputVariableWithLargestInterval = 0; + double largestIntervalSoFar = 0; + for ( const auto &variable : _preprocessedQuery->getInputVariables() ) + { + double interval = _tableau->getUpperBound( variable ) - _tableau->getLowerBound( variable ); + if ( interval > largestIntervalSoFar ) + { + inputVariableWithLargestInterval = variable; + largestIntervalSoFar = interval; + } + } + + if ( largestIntervalSoFar == 0 ) + return NULL; + else + { + double mid = ( _tableau->getLowerBound( inputVariableWithLargestInterval ) + + _tableau->getUpperBound( inputVariableWithLargestInterval ) ) / + 2; + PiecewiseLinearCaseSplit s1; + s1.storeBoundTightening( + Tightening( inputVariableWithLargestInterval, mid, Tightening::UB ) ); + PiecewiseLinearCaseSplit s2; + s2.storeBoundTightening( + Tightening( inputVariableWithLargestInterval, mid, Tightening::LB ) ); + + List splits; + splits.append( s1 ); + splits.append( s2 ); + _disjunctionForSplitting = + std::unique_ptr( new DisjunctionConstraint( splits ) ); + return _disjunctionForSplitting.get(); + } +} + +PiecewiseLinearConstraint *Engine::pickSplitPLConstraint( DivideStrategy strategy ) +{ + ENGINE_LOG( Stringf( "Picking a split PLConstraint..." ).ascii() ); + + PiecewiseLinearConstraint *candidatePLConstraint = NULL; + if ( strategy == DivideStrategy::PseudoImpact ) + { + if ( _smtCore.getStackDepth() > 3 ) + candidatePLConstraint = _smtCore.getConstraintsWithHighestScore(); + else if ( !_preprocessedQuery->getInputVariables().empty() && + _preprocessedQuery->getInputVariables().size() < + GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD ) + candidatePLConstraint = pickSplitPLConstraintBasedOnIntervalWidth(); + else + { + candidatePLConstraint = pickSplitPLConstraintBasedOnPolarity(); + if ( candidatePLConstraint == NULL ) + candidatePLConstraint = _smtCore.getConstraintsWithHighestScore(); + } + } + else if ( strategy == DivideStrategy::Polarity ) + candidatePLConstraint = pickSplitPLConstraintBasedOnPolarity(); + else if ( strategy == DivideStrategy::EarliestReLU ) + candidatePLConstraint = pickSplitPLConstraintBasedOnTopology(); + else if ( strategy == DivideStrategy::LargestInterval && + ( ( _smtCore.getStackDepth() + 1 ) % + GlobalConfiguration::INTERVAL_SPLITTING_FREQUENCY != + 0 ) ) + { + // Conduct interval splitting periodically. + candidatePLConstraint = pickSplitPLConstraintBasedOnIntervalWidth(); + } + ENGINE_LOG( + Stringf( ( candidatePLConstraint ? "Picked..." + : "Unable to pick using the current strategy..." ) ) + .ascii() ); + return candidatePLConstraint; +} + +PiecewiseLinearConstraint *Engine::pickSplitPLConstraintSnC( SnCDivideStrategy strategy ) +{ + PiecewiseLinearConstraint *candidatePLConstraint = NULL; + if ( strategy == SnCDivideStrategy::Polarity ) + candidatePLConstraint = pickSplitPLConstraintBasedOnPolarity(); + else if ( strategy == SnCDivideStrategy::EarliestReLU ) + candidatePLConstraint = pickSplitPLConstraintBasedOnTopology(); + + ENGINE_LOG( Stringf( "Done updating scores..." ).ascii() ); + ENGINE_LOG( + Stringf( ( candidatePLConstraint ? "Picked..." + : "Unable to pick using the current strategy..." ) ) + .ascii() ); + return candidatePLConstraint; +} + +bool Engine::restoreSmtState( SmtState &smtState ) +{ + try + { + ASSERT( _smtCore.getStackDepth() == 0 ); + + // Step 1: all implied valid splits at root + for ( auto &validSplit : smtState._impliedValidSplitsAtRoot ) + { + applySplit( validSplit ); + _smtCore.recordImpliedValidSplit( validSplit ); + } + + tightenBoundsOnConstraintMatrix(); + _boundManager.propagateTightenings(); + // For debugging purposes + checkBoundCompliancyWithDebugSolution(); + do + performSymbolicBoundTightening(); + while ( applyAllValidConstraintCaseSplits() ); + + // Step 2: replay the stack + for ( auto &stackEntry : smtState._stack ) + { + _smtCore.replaySmtStackEntry( stackEntry ); + // Do all the bound propagation, and set ReLU constraints to inactive (at + // least the one corresponding to the _activeSplit applied above. + tightenBoundsOnConstraintMatrix(); + + // For debugging purposes + checkBoundCompliancyWithDebugSolution(); + do + performSymbolicBoundTightening(); + while ( applyAllValidConstraintCaseSplits() ); + } + _boundManager.propagateTightenings(); + } + catch ( const InfeasibleQueryException & ) + { + // The current query is unsat, and we need to pop. + // If we're at level 0, the whole query is unsat. + if ( _produceUNSATProofs ) + explainSimplexFailure(); + + if ( !_smtCore.popSplit() ) + { + if ( _verbosity > 0 ) + { + printf( "\nEngine::solve: UNSAT query\n" ); + _statistics.print(); + } + _exitCode = Engine::UNSAT; + for ( PiecewiseLinearConstraint *p : _plConstraints ) + p->setActiveConstraint( true ); + return false; + } + } + return true; +} + +void Engine::storeSmtState( SmtState &smtState ) +{ + _smtCore.storeSmtState( smtState ); +} + +bool Engine::solveWithMILPEncoding( double timeoutInSeconds ) +{ + try + { + if ( _lpSolverType == LPSolverType::NATIVE && _tableau->basisMatrixAvailable() ) + { + explicitBasisBoundTightening(); + applyAllBoundTightenings(); + applyAllValidConstraintCaseSplits(); + } + + while ( applyAllValidConstraintCaseSplits() ) + { + performSymbolicBoundTightening(); + } + } + catch ( const InfeasibleQueryException & ) + { + _exitCode = Engine::UNSAT; + return false; + } + + ENGINE_LOG( "Encoding the input query with Gurobi...\n" ); + _gurobi = std::unique_ptr( new GurobiWrapper() ); + _tableau->setGurobi( &( *_gurobi ) ); + _milpEncoder = std::unique_ptr( new MILPEncoder( *_tableau ) ); + _milpEncoder->encodeQuery( *_gurobi, *_preprocessedQuery ); + ENGINE_LOG( "Query encoded in Gurobi...\n" ); + + double timeoutForGurobi = ( timeoutInSeconds == 0 ? FloatUtils::infinity() : timeoutInSeconds ); + ENGINE_LOG( Stringf( "Gurobi timeout set to %f\n", timeoutForGurobi ).ascii() ) + _gurobi->setTimeLimit( timeoutForGurobi ); + if ( !_sncMode ) + _gurobi->setNumberOfThreads( Options::get()->getInt( Options::NUM_WORKERS ) ); + _gurobi->setVerbosity( _verbosity > 0 ); + _gurobi->solve(); + + if ( _gurobi->haveFeasibleSolution() ) + { + if ( allNonlinearConstraintsHold() ) + { + _exitCode = IEngine::SAT; + return true; + } + else + { + _exitCode = IEngine::UNKNOWN; + return false; + } + } + else if ( _gurobi->infeasible() ) + _exitCode = IEngine::UNSAT; + else if ( _gurobi->timeout() ) + _exitCode = IEngine::TIMEOUT; + else + throw NLRError( NLRError::UNEXPECTED_RETURN_STATUS_FROM_GUROBI ); + return false; +} + +bool Engine::preprocessingEnabled() const +{ + return _preprocessingEnabled; +} + +Preprocessor *Engine::getPreprocessor() +{ + return &_preprocessor; +} + +bool Engine::performDeepSoILocalSearch() +{ + ENGINE_LOG( "Performing local search..." ); + struct timespec start = TimeUtils::sampleMicro(); + ASSERT( allVarsWithinBounds() ); + + // All the linear constraints have been satisfied at this point. + // Update the cost function + _soiManager->initializePhasePattern(); + + LinearExpression initialPhasePattern = _soiManager->getCurrentSoIPhasePattern(); + + if ( initialPhasePattern.isZero() ) + { + if ( hasBranchingCandidate() ) + while ( !_smtCore.needToSplit() ) + _smtCore.reportRejectedPhasePatternProposal(); + return false; + } + + minimizeHeuristicCost( initialPhasePattern ); + ASSERT( allVarsWithinBounds() ); + _soiManager->updateCurrentPhasePatternForSatisfiedPLConstraints(); + // Always accept the first phase pattern. + _soiManager->acceptCurrentPhasePattern(); + double costOfLastAcceptedPhasePattern = + computeHeuristicCost( _soiManager->getCurrentSoIPhasePattern() ); + + double costOfProposedPhasePattern = FloatUtils::infinity(); + bool lastProposalAccepted = true; + while ( !_smtCore.needToSplit() ) + { + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_LOCAL_SEARCH_MICRO, + TimeUtils::timePassed( start, end ) ); + start = end; + + if ( lastProposalAccepted ) + { + /* + Check whether the optimal solution to the last accepted phase + is a real solution. We only check this when the last proposal + was accepted, because rejected phase pattern must have resulted in + increase in the SoI cost. + + HW: Another option is to only do this check when + costOfLastAcceptedPhasePattern is 0, but this might be too strict. + The overhead is low anyway. + */ + collectViolatedPlConstraints(); + if ( allPlConstraintsHold() ) + { + if ( _lpSolverType == LPSolverType::NATIVE && + _tableau->getBasicAssignmentStatus() != + ITableau::BASIC_ASSIGNMENT_JUST_COMPUTED ) + { + if ( _verbosity > 0 ) + { + printf( "Before declaring sat, recomputing...\n" ); + } + // Make sure that the assignment is precise before declaring success + _tableau->computeAssignment(); + // If we actually have a real satisfying assignment, + return false; + } + else + { + ENGINE_LOG( "Performing local search - done." ); + return true; + } + } + else if ( FloatUtils::isZero( costOfLastAcceptedPhasePattern ) ) + { + // Corner case: the SoI is minimal but there are still some PL + // constraints (those not in the SoI) unsatisfied. + // In this case, we bump up the score of PLConstraints not in + // the SoI with the hope to branch on them early. + bumpUpPseudoImpactOfPLConstraintsNotInSoI(); + if ( hasBranchingCandidate() ) + while ( !_smtCore.needToSplit() ) + _smtCore.reportRejectedPhasePatternProposal(); + return false; + } + } + + // No satisfying assignment found for the last accepted phase pattern, + // propose an update to it. + _soiManager->proposePhasePatternUpdate(); + minimizeHeuristicCost( _soiManager->getCurrentSoIPhasePattern() ); + _soiManager->updateCurrentPhasePatternForSatisfiedPLConstraints(); + costOfProposedPhasePattern = + computeHeuristicCost( _soiManager->getCurrentSoIPhasePattern() ); + + // We have the "local" effect of change the cost term of some + // PLConstraints in the phase pattern. Use this information to influence + // the branching decision. + updatePseudoImpactWithSoICosts( costOfLastAcceptedPhasePattern, + costOfProposedPhasePattern ); + + // Decide whether to accept the last proposal. + if ( _soiManager->decideToAcceptCurrentProposal( costOfLastAcceptedPhasePattern, + costOfProposedPhasePattern ) ) + { + _soiManager->acceptCurrentPhasePattern(); + costOfLastAcceptedPhasePattern = costOfProposedPhasePattern; + lastProposalAccepted = true; + } + else + { + _smtCore.reportRejectedPhasePatternProposal(); + lastProposalAccepted = false; + } + } + + ENGINE_LOG( "Performing local search - done" ); + return false; +} + +void Engine::minimizeHeuristicCost( const LinearExpression &heuristicCost ) +{ + ENGINE_LOG( "Optimizing w.r.t. the current heuristic cost..." ); + + if ( _lpSolverType == LPSolverType::GUROBI ) + { + minimizeCostWithGurobi( heuristicCost ); + + ENGINE_LOG( + Stringf( "Current heuristic cost: %f", _gurobi->getOptimalCostOrObjective() ).ascii() ); + } + else + { + _tableau->toggleOptimization( true ); + + _heuristicCost = heuristicCost; + + bool localOptimumReached = false; + while ( !localOptimumReached ) + { + DEBUG( _tableau->verifyInvariants() ); + + mainLoopStatistics(); + if ( _verbosity > 1 && + _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ) % + _statisticsPrintingFrequency == + 0 ) + _statistics.print(); + + if ( !allVarsWithinBounds() ) + throw VariableOutOfBoundDuringOptimizationException(); + + if ( performPrecisionRestorationIfNeeded() ) + continue; + + ASSERT( allVarsWithinBounds() ); + + localOptimumReached = performSimplexStep(); + } + _tableau->toggleOptimization( false ); + ENGINE_LOG( Stringf( "Current heuristic cost: %f", computeHeuristicCost( heuristicCost ) ) + .ascii() ); + } + + ENGINE_LOG( "Optimizing w.r.t. the current heuristic cost - done\n" ); +} + +double Engine::computeHeuristicCost( const LinearExpression &heuristicCost ) +{ + return ( _costFunctionManager->computeGivenCostFunctionDirectly( heuristicCost._addends ) + + heuristicCost._constant ); +} + +void Engine::updatePseudoImpactWithSoICosts( double costOfLastAcceptedPhasePattern, + double costOfProposedPhasePattern ) +{ + ASSERT( _soiManager ); + + const List &constraintsUpdated = + _soiManager->getConstraintsUpdatedInLastProposal(); + // Score is divided by the number of updated constraints in the last + // proposal. In the Sum of Infeasibilities paper, only one constraint + // is updated each time. But we might consider alternative proposal + // strategy in the future. + double score = ( fabs( costOfLastAcceptedPhasePattern - costOfProposedPhasePattern ) / + constraintsUpdated.size() ); + + ASSERT( constraintsUpdated.size() > 0 ); + // Update the Pseudo-Impact estimation. + for ( const auto &constraint : constraintsUpdated ) + _smtCore.updatePLConstraintScore( constraint, score ); +} + +void Engine::bumpUpPseudoImpactOfPLConstraintsNotInSoI() +{ + ASSERT( _soiManager ); + for ( const auto &plConstraint : _plConstraints ) + { + if ( plConstraint->isActive() && !plConstraint->supportSoI() && + !plConstraint->phaseFixed() && !plConstraint->satisfied() ) + _smtCore.updatePLConstraintScore( + plConstraint, GlobalConfiguration::SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI ); + } +} + +void Engine::informLPSolverOfBounds() +{ + if ( _lpSolverType == LPSolverType::GUROBI ) + { + struct timespec start = TimeUtils::sampleMicro(); + for ( unsigned i = 0; i < _preprocessedQuery->getNumberOfVariables(); ++i ) + { + String variableName = _milpEncoder->getVariableNameFromVariable( i ); + _gurobi->setLowerBound( variableName, _tableau->getLowerBound( i ) ); + _gurobi->setUpperBound( variableName, _tableau->getUpperBound( i ) ); + } + _gurobi->updateModel(); + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_ADDING_CONSTRAINTS_TO_MILP_SOLVER_MICRO, + TimeUtils::timePassed( start, end ) ); + } + else + { + // Bounds are already up-to-date in Tableau when using native Simplex. + return; + } +} + +bool Engine::minimizeCostWithGurobi( const LinearExpression &costFunction ) +{ + ASSERT( _gurobi && _milpEncoder ); + + struct timespec simplexStart = TimeUtils::sampleMicro(); + + _milpEncoder->encodeCostFunction( *_gurobi, costFunction ); + _gurobi->setTimeLimit( FloatUtils::infinity() ); + _gurobi->solve(); + + struct timespec simplexEnd = TimeUtils::sampleMicro(); + + _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, + TimeUtils::timePassed( simplexStart, simplexEnd ) ); + _statistics.incLongAttribute( Statistics::NUM_SIMPLEX_STEPS, + _gurobi->getNumberOfSimplexIterations() ); + + if ( _gurobi->infeasible() ) + throw InfeasibleQueryException(); + else if ( _gurobi->optimal() ) + return true; + else + throw CommonError( CommonError::UNEXPECTED_GUROBI_STATUS, + Stringf( "Current status: %u", _gurobi->getStatusCode() ).ascii() ); + + return false; +} + +void Engine::checkGurobiBoundConsistency() const +{ + if ( _gurobi && _milpEncoder ) + { + for ( unsigned i = 0; i < _preprocessedQuery->getNumberOfVariables(); ++i ) + { + String iName = _milpEncoder->getVariableNameFromVariable( i ); + double gurobiLowerBound = _gurobi->getLowerBound( iName ); + double lowerBound = _tableau->getLowerBound( i ); + if ( !FloatUtils::areEqual( gurobiLowerBound, lowerBound ) ) + { + throw MarabouError( MarabouError::BOUNDS_NOT_UP_TO_DATE_IN_LP_SOLVER, + Stringf( "x%u lower bound inconsistent!" + " Gurobi: %f, Tableau: %f", + i, + gurobiLowerBound, + lowerBound ) + .ascii() ); + } + double gurobiUpperBound = _gurobi->getUpperBound( iName ); + double upperBound = _tableau->getUpperBound( i ); + + if ( !FloatUtils::areEqual( gurobiUpperBound, upperBound ) ) + { + throw MarabouError( MarabouError::BOUNDS_NOT_UP_TO_DATE_IN_LP_SOLVER, + Stringf( "x%u upper bound inconsistent!" + " Gurobi: %f, Tableau: %f", + i, + gurobiUpperBound, + upperBound ) + .ascii() ); + } + } + } +} + +bool Engine::consistentBounds() const +{ + return _boundManager.consistentBounds(); +} + +Query Engine::buildQueryFromCurrentState() const +{ + Query query = *_preprocessedQuery; + for ( unsigned i = 0; i < query.getNumberOfVariables(); ++i ) + { + query.setLowerBound( i, _tableau->getLowerBound( i ) ); + query.setUpperBound( i, _tableau->getUpperBound( i ) ); + } + return query; +} + +void Engine::updateGroundUpperBound( const unsigned var, const double value ) +{ + ASSERT( var < _tableau->getN() && _produceUNSATProofs ); + if ( FloatUtils::lt( value, _groundBoundManager.getUpperBound( var ) ) ) + _groundBoundManager.setUpperBound( var, value ); +} + +void Engine::updateGroundLowerBound( const unsigned var, const double value ) +{ + ASSERT( var < _tableau->getN() && _produceUNSATProofs ); + if ( FloatUtils::gt( value, _groundBoundManager.getLowerBound( var ) ) ) + _groundBoundManager.setLowerBound( var, value ); +} + +double Engine::getGroundBound( unsigned var, bool isUpper ) const +{ + ASSERT( var < _tableau->getN() && _produceUNSATProofs ); + return isUpper ? _groundBoundManager.getUpperBound( var ) + : _groundBoundManager.getLowerBound( var ); +} + +bool Engine::shouldProduceProofs() const +{ + return _produceUNSATProofs; +} + +void Engine::explainSimplexFailure() +{ + ASSERT( _produceUNSATProofs ); + + DEBUG( checkGroundBounds() ); + + unsigned infeasibleVar = _boundManager.getInconsistentVariable(); + + if ( infeasibleVar == IBoundManager::NO_VARIABLE_FOUND || + !certifyInfeasibility( infeasibleVar ) ) + infeasibleVar = explainFailureWithTableau(); + + if ( infeasibleVar == IBoundManager::NO_VARIABLE_FOUND ) + infeasibleVar = explainFailureWithCostFunction(); + + if ( infeasibleVar == IBoundManager::NO_VARIABLE_FOUND ) + { + _costFunctionManager->computeCoreCostFunction(); + infeasibleVar = explainFailureWithCostFunction(); + } + + if ( infeasibleVar == IBoundManager::NO_VARIABLE_FOUND ) + { + markLeafToDelegate(); + return; + } + + ASSERT( infeasibleVar < _tableau->getN() ); + ASSERT( _UNSATCertificateCurrentPointer && + !( **_UNSATCertificateCurrentPointer ).getContradiction() ); + _statistics.incUnsignedAttribute( Statistics::NUM_CERTIFIED_LEAVES ); + + writeContradictionToCertificate( infeasibleVar ); + + ( **_UNSATCertificateCurrentPointer ).makeLeaf(); +} + +bool Engine::certifyInfeasibility( unsigned var ) const +{ + ASSERT( _produceUNSATProofs ); + + Vector contradiction = computeContradiction( var ); + + if ( contradiction.empty() ) + return FloatUtils::isNegative( _groundBoundManager.getUpperBound( var ) - + _groundBoundManager.getLowerBound( var ) ); + + SparseUnsortedList sparseContradiction = SparseUnsortedList(); + + contradiction.empty() + ? sparseContradiction.initializeToEmpty() + : sparseContradiction.initialize( contradiction.data(), contradiction.size() ); + + // In case contradiction is a vector of zeros + if ( sparseContradiction.empty() ) + return FloatUtils::isNegative( _groundBoundManager.getUpperBound( var ) - + _groundBoundManager.getLowerBound( var ) ); + + double derivedBound = + UNSATCertificateUtils::computeCombinationUpperBound( sparseContradiction, + _tableau->getSparseA(), + _groundBoundManager.getUpperBounds(), + _groundBoundManager.getLowerBounds(), + _tableau->getN() ); + return FloatUtils::isNegative( derivedBound ); +} + +double Engine::explainBound( unsigned var, bool isUpper ) const +{ + ASSERT( _produceUNSATProofs ); + + SparseUnsortedList explanation( 0 ); + + if ( !_boundManager.isExplanationTrivial( var, isUpper ) ) + explanation = _boundManager.getExplanation( var, isUpper ); + + if ( explanation.empty() ) + return isUpper ? _groundBoundManager.getUpperBound( var ) + : _groundBoundManager.getLowerBound( var ); + + return UNSATCertificateUtils::computeBound( var, + isUpper, + explanation, + _tableau->getSparseA(), + _groundBoundManager.getUpperBounds(), + _groundBoundManager.getLowerBounds(), + _tableau->getN() ); +} + +bool Engine::validateBounds( unsigned var, double epsilon, bool isUpper ) const +{ + ASSERT( _produceUNSATProofs ); + + double explained, real; + explained = explainBound( var, isUpper ); + if ( isUpper ) + { + real = _boundManager.getUpperBound( var ); + if ( explained - real > epsilon ) + { + ENGINE_LOG( "Var %d. Computed Upper %.5lf, real %.5lf. Difference is %.10lf\n", + var, + explained, + real, + abs( explained - real ) ); + return false; + } + } + else + { + real = _boundManager.getLowerBound( var ); + if ( explained - real < -epsilon ) + { + ENGINE_LOG( "Var %d. Computed Lower %.5lf, real %.5lf. Difference is %.10lf\n", + var, + explained, + real, + abs( explained - real ) ); + return false; + } + } + return true; +} + +bool Engine::validateAllBounds( double epsilon ) const +{ + ASSERT( _produceUNSATProofs ); + + bool res = true; + + for ( unsigned var = 0; var < _tableau->getN(); ++var ) + if ( !validateBounds( var, epsilon, Tightening::UB ) || + !validateBounds( var, epsilon, Tightening::LB ) ) + res = false; + + return res; +} + +bool Engine::checkGroundBounds() const +{ + ASSERT( _produceUNSATProofs ); + + for ( unsigned i = 0; i < _tableau->getN(); ++i ) + { + if ( FloatUtils::gt( _groundBoundManager.getLowerBound( i ), + _boundManager.getLowerBound( i ) ) || + FloatUtils::lt( _groundBoundManager.getUpperBound( i ), + _boundManager.getUpperBound( i ) ) ) + return false; + } + return true; +} + +unsigned Engine::explainFailureWithTableau() +{ + ASSERT( _produceUNSATProofs ); + + // Failure of a simplex step implies infeasible bounds imposed by the row + TableauRow boundUpdateRow = TableauRow( _tableau->getN() ); + + // For every basic, check that is has no slack and its explanations indeed prove a + // contradiction + unsigned basicVar; + + for ( unsigned i = 0; i < _tableau->getM(); ++i ) + { + if ( _tableau->basicOutOfBounds( i ) ) + { + _tableau->getTableauRow( i, &boundUpdateRow ); + basicVar = boundUpdateRow._lhs; + + if ( FloatUtils::gt( _boundManager.computeRowBound( boundUpdateRow, Tightening::LB ), + _boundManager.getUpperBound( basicVar ) ) && + explainAndCheckContradiction( basicVar, Tightening::LB, &boundUpdateRow ) ) + return basicVar; + + if ( FloatUtils::lt( _boundManager.computeRowBound( boundUpdateRow, Tightening::UB ), + _boundManager.getLowerBound( basicVar ) ) && + explainAndCheckContradiction( basicVar, Tightening::UB, &boundUpdateRow ) ) + return basicVar; + } + } + + return IBoundManager::NO_VARIABLE_FOUND; +} + +unsigned Engine::explainFailureWithCostFunction() +{ + ASSERT( _produceUNSATProofs ); + + // Failure of a simplex step might imply infeasible bounds imposed by the cost function + unsigned curBasicVar; + unsigned infVar = IBoundManager::NO_VARIABLE_FOUND; + double curCost; + bool curUpper; + const SparseUnsortedList *costRow = _costFunctionManager->createRowOfCostFunction(); + + for ( unsigned i = 0; i < _tableau->getM(); ++i ) + { + curBasicVar = _tableau->basicIndexToVariable( i ); + curCost = _costFunctionManager->getBasicCost( i ); + + if ( FloatUtils::isZero( curCost ) ) + continue; + + curUpper = ( curCost < 0 ); + + // Check the basic variable has no slack + if ( !( !curUpper && FloatUtils::gt( _boundManager.computeSparseRowBound( + *costRow, Tightening::LB, curBasicVar ), + _boundManager.getUpperBound( curBasicVar ) ) ) && + !( curUpper && FloatUtils::lt( _boundManager.computeSparseRowBound( + *costRow, Tightening::UB, curBasicVar ), + _boundManager.getLowerBound( curBasicVar ) ) ) ) + + continue; + + // Check the explanation indeed proves a contradiction + if ( explainAndCheckContradiction( curBasicVar, curUpper, costRow ) ) + { + infVar = curBasicVar; + break; + } + } + + delete costRow; + return infVar; +} + +bool Engine::explainAndCheckContradiction( unsigned var, bool isUpper, const TableauRow *row ) +{ + ASSERT( _produceUNSATProofs ); + + SparseUnsortedList backup( 0 ); + backup = _boundManager.getExplanation( var, isUpper ); + + _boundManager.updateBoundExplanation( *row, isUpper, var ); + + // Ensure the proof is correct + if ( certifyInfeasibility( var ) ) + return true; + + // If not, restores previous certificate if the proof is wrong + _boundManager.setExplanation( backup, var, isUpper ); + + return false; +} + +bool Engine::explainAndCheckContradiction( unsigned var, + bool isUpper, + const SparseUnsortedList *row ) +{ + ASSERT( _produceUNSATProofs ); + + SparseUnsortedList backup( 0 ); + backup = _boundManager.getExplanation( var, isUpper ); + + _boundManager.updateBoundExplanationSparse( *row, isUpper, var ); + + // Ensure the proof is correct + if ( certifyInfeasibility( var ) ) + return true; + + // If not, restores previous certificate if the proof is wrong + _boundManager.setExplanation( backup, var, isUpper ); + + return false; +} + +UnsatCertificateNode *Engine::getUNSATCertificateCurrentPointer() const +{ + return _UNSATCertificateCurrentPointer->get(); +} + +void Engine::setUNSATCertificateCurrentPointer( UnsatCertificateNode *node ) +{ + _UNSATCertificateCurrentPointer->set( node ); +} + +const UnsatCertificateNode *Engine::getUNSATCertificateRoot() const +{ + return _UNSATCertificate; +} + +bool Engine::certifyUNSATCertificate() +{ + ASSERT( _produceUNSATProofs && _UNSATCertificate && !_smtCore.getStackDepth() ); + + for ( auto &constraint : _plConstraints ) + { + if ( !UNSATCertificateUtils::getSupportedActivations().exists( constraint->getType() ) ) + { + String activationType = constraint->serializeToString().tokenize( "," ).back(); + printf( "Certification Error! Network contains activation function %s, that is not yet " + "supported by Marabou certification.\n", + activationType.ascii() ); + return false; + } + } + + struct timespec certificationStart = TimeUtils::sampleMicro(); + _precisionRestorer.restoreInitialEngineState( *this ); + + Vector groundUpperBounds( _tableau->getN(), 0 ); + Vector groundLowerBounds( _tableau->getN(), 0 ); + + for ( unsigned i = 0; i < _tableau->getN(); ++i ) + { + groundUpperBounds[i] = _groundBoundManager.getUpperBound( i ); + groundLowerBounds[i] = _groundBoundManager.getLowerBound( i ); + } + + if ( GlobalConfiguration::WRITE_JSON_PROOF ) + { + File file( JsonWriter::PROOF_FILENAME ); + JsonWriter::writeProofToJson( _UNSATCertificate, + _tableau->getM(), + _tableau->getSparseA(), + groundUpperBounds, + groundLowerBounds, + _plConstraints, + file ); + } + + Checker unsatCertificateChecker( _UNSATCertificate, + _tableau->getM(), + _tableau->getSparseA(), + groundUpperBounds, + groundLowerBounds, + _plConstraints ); + bool certificationSucceeded = unsatCertificateChecker.check(); + + _statistics.setLongAttribute( + Statistics::TOTAL_CERTIFICATION_TIME, + TimeUtils::timePassed( certificationStart, TimeUtils::sampleMicro() ) ); + printf( "Certification time: " ); + _statistics.printLongAttributeAsTime( + _statistics.getLongAttribute( Statistics::TOTAL_CERTIFICATION_TIME ) ); + + if ( certificationSucceeded ) + { + printf( "Certified\n" ); + _statistics.incUnsignedAttribute( Statistics::CERTIFIED_UNSAT ); + if ( _statistics.getUnsignedAttribute( Statistics::NUM_DELEGATED_LEAVES ) ) + printf( "Some leaves were delegated and need to be certified separately by an SMT " + "solver\n" ); + } + else + printf( "Error certifying UNSAT certificate\n" ); + + DEBUG( { + ASSERT( certificationSucceeded ); + if ( _statistics.getUnsignedAttribute( Statistics::NUM_POPS ) ) + { + double delegationRatio = + _statistics.getUnsignedAttribute( Statistics::NUM_DELEGATED_LEAVES ) / + _statistics.getUnsignedAttribute( Statistics::NUM_CERTIFIED_LEAVES ); + ASSERT( FloatUtils::lt( delegationRatio, 0.01 ) ); + } + } ); + + return certificationSucceeded; +} + +void Engine::markLeafToDelegate() +{ + ASSERT( _produceUNSATProofs ); + + // Mark leaf with toDelegate Flag + UnsatCertificateNode *currentUnsatCertificateNode = _UNSATCertificateCurrentPointer->get(); + ASSERT( _UNSATCertificateCurrentPointer && !currentUnsatCertificateNode->getContradiction() ); + currentUnsatCertificateNode->setDelegationStatus( DelegationStatus::DELEGATE_SAVE ); + currentUnsatCertificateNode->deletePLCExplanations(); + _statistics.incUnsignedAttribute( Statistics::NUM_DELEGATED_LEAVES ); + + if ( !currentUnsatCertificateNode->getChildren().empty() ) + currentUnsatCertificateNode->makeLeaf(); +} + +const Vector Engine::computeContradiction( unsigned infeasibleVar ) const +{ + ASSERT( _produceUNSATProofs ); + + unsigned m = _tableau->getM(); + SparseUnsortedList upperBoundExplanation( 0 ); + SparseUnsortedList lowerBoundExplanation( 0 ); + + if ( !_boundManager.isExplanationTrivial( infeasibleVar, Tightening::UB ) ) + upperBoundExplanation = _boundManager.getExplanation( infeasibleVar, Tightening::UB ); + + if ( !_boundManager.isExplanationTrivial( infeasibleVar, Tightening::LB ) ) + lowerBoundExplanation = _boundManager.getExplanation( infeasibleVar, Tightening::LB ); + + if ( upperBoundExplanation.empty() && lowerBoundExplanation.empty() ) + return Vector( 0 ); + + Vector contradiction = Vector( m, 0 ); + + if ( !upperBoundExplanation.empty() ) + for ( const auto &entry : upperBoundExplanation ) + contradiction[entry._index] = entry._value; + + if ( !lowerBoundExplanation.empty() ) + for ( const auto &entry : lowerBoundExplanation ) + contradiction[entry._index] -= entry._value; + + return contradiction; +} + +void Engine::writeContradictionToCertificate( unsigned infeasibleVar ) const +{ + ASSERT( _produceUNSATProofs ); + + Vector leafContradictionVec = computeContradiction( infeasibleVar ); + + Contradiction *leafContradiction = leafContradictionVec.empty() + ? new Contradiction( infeasibleVar ) + : new Contradiction( leafContradictionVec ); + ( **_UNSATCertificateCurrentPointer ).setContradiction( leafContradiction ); +} + +const BoundExplainer *Engine::getBoundExplainer() const +{ + return _boundManager.getBoundExplainer(); +} + +void Engine::setBoundExplainerContent( BoundExplainer *boundExplainer ) +{ + _boundManager.copyBoundExplainerContent( boundExplainer ); +} + +void Engine::propagateBoundManagerTightenings() +{ + _boundManager.propagateTightenings(); +} + +void Engine::extractBounds( IQuery &inputQuery ) +{ + for ( unsigned i = 0; i < inputQuery.getNumberOfVariables(); ++i ) + { + if ( _preprocessingEnabled ) + { + // Has the variable been merged into another? + unsigned variable = i; + while ( _preprocessor.variableIsMerged( variable ) ) + variable = _preprocessor.getMergedIndex( variable ); + + // Symbolically fixed variables are ignored + if ( _preprocessor.variableIsUnusedAndSymbolicallyFixed( i ) ) + { + inputQuery.tightenLowerBound( i, FloatUtils::negativeInfinity() ); + inputQuery.tightenUpperBound( i, FloatUtils::infinity() ); + continue; + } + + // Fixed variables are easy: return the value they've been fixed to. + if ( _preprocessor.variableIsFixed( variable ) ) + { + inputQuery.tightenLowerBound( i, _preprocessor.getFixedValue( variable ) ); + inputQuery.tightenUpperBound( i, _preprocessor.getFixedValue( variable ) ); + continue; + } + + // We know which variable to look for, but it may have been assigned + // a new index, due to variable elimination + variable = _preprocessor.getNewIndex( variable ); + + inputQuery.tightenLowerBound( i, _preprocessedQuery->getLowerBound( variable ) ); + inputQuery.tightenUpperBound( i, _preprocessedQuery->getUpperBound( variable ) ); + } + else + { + inputQuery.tightenLowerBound( i, _preprocessedQuery->getLowerBound( i ) ); + inputQuery.tightenUpperBound( i, _preprocessedQuery->getUpperBound( i ) ); + } + } +} + +void Engine::addPLCLemma( std::shared_ptr &explanation ) +{ + if ( !_produceUNSATProofs ) + return; + + ASSERT( explanation && _UNSATCertificate && _UNSATCertificateCurrentPointer ) + _statistics.incUnsignedAttribute( Statistics::NUM_LEMMAS ); + _UNSATCertificateCurrentPointer->get()->addPLCLemma( explanation ); +} \ No newline at end of file diff --git a/src/nlr/GlobalConfiguration.cpp b/src/nlr/GlobalConfiguration.cpp new file mode 100644 index 0000000000..3a08b92234 --- /dev/null +++ b/src/nlr/GlobalConfiguration.cpp @@ -0,0 +1,238 @@ +/********************* */ +/*! \file GlobalConfiguration.cpp + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Parth Shah, Derek Huang + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + + **/ + +#include "GlobalConfiguration.h" + +#include "DivideStrategy.h" +#include "MString.h" + +#include + + +// The exponential moving average is calculated as +// ema = current * alpha + previous * (1 - alpha) +const double GlobalConfiguration::EXPONENTIAL_MOVING_AVERAGE_ALPHA = 0.5; + +// Whether to use SoI instead of Reluplex for local search for satisfying assignments +// to non-linear constraint. +bool GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH = true; + +const double GlobalConfiguration::SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI = 5; + +// Use the polarity metrics to decide which branch to take first in a case split +// and how to repair a ReLU constraint. +const bool GlobalConfiguration::USE_POLARITY_BASED_DIRECTION_HEURISTICS = true; + +const double GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS = 0.0000000001; +const unsigned GlobalConfiguration::DEFAULT_DOUBLE_TO_STRING_PRECISION = 10; +const unsigned GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY = 10000; +const unsigned GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY_GUROBI = 100; +const double GlobalConfiguration::BOUND_COMPARISON_ADDITIVE_TOLERANCE = 0.0000001; +const double GlobalConfiguration::BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE = 0.001 * 0.0000001; +const double GlobalConfiguration::PIVOT_CHANGE_COLUMN_TOLERANCE = 0.000000001; +const double GlobalConfiguration::PIVOT_ROW_AND_COLUMN_TOLERANCE = 0.01; +const double GlobalConfiguration::ENTRY_ELIGIBILITY_TOLERANCE = 0.00000001; +const double GlobalConfiguration::RATIO_CONSTRAINT_ADDITIVE_TOLERANCE = 0.0000001 * 0.3; +const double GlobalConfiguration::RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE = + 0.001 * 0.0000001 * 0.3; +const double GlobalConfiguration::HARRIS_RATIO_CONSTRAINT_ADDITIVE_TOLERANCE = 0.0000001 * 0.5; +const double GlobalConfiguration::HARRIS_RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE = + 0.001 * 0.0000001 * 0.5; +const double GlobalConfiguration::BASIC_COSTS_ADDITIVE_TOLERANCE = 0.0000001; +const double GlobalConfiguration::BASIC_COSTS_MULTIPLICATIVE_TOLERANCE = 0.001 * 0.0000001; +const double GlobalConfiguration::SPARSE_FORREST_TOMLIN_DIAGONAL_ELEMENT_TOLERANCE = 0.00001; +const unsigned GlobalConfiguration::DEGRADATION_CHECKING_FREQUENCY = 100; +const double GlobalConfiguration::DEGRADATION_THRESHOLD = 0.1; +const double GlobalConfiguration::ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD = 0.0001; +const bool GlobalConfiguration::USE_COLUMN_MERGING_EQUATIONS = false; +const double GlobalConfiguration::GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD = 0.1; +const unsigned GlobalConfiguration::MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS = 5; +const DivideStrategy GlobalConfiguration::SPLITTING_HEURISTICS = DivideStrategy::ReLUViolation; +const unsigned GlobalConfiguration::INTERVAL_SPLITTING_FREQUENCY = 10; +const unsigned GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD = 10; +const unsigned GlobalConfiguration::BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY = 100; +const unsigned GlobalConfiguration::ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS = 20; +const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; + +const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 1000; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 100; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.1; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.05; + +const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; + +const double GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT = 0.00000000001; +const double GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT = 0.00000001; + +const double GlobalConfiguration::SIGMOID_CUTOFF_CONSTANT = 20; + +const bool GlobalConfiguration::PREPROCESS_INPUT_QUERY = true; +const bool GlobalConfiguration::PREPROCESSOR_ELIMINATE_VARIABLES = true; +const bool GlobalConfiguration::PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; +const bool GlobalConfiguration::NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; +const double GlobalConfiguration::PREPROCESSOR_ALMOST_FIXED_THRESHOLD = 0.00001; + +const unsigned GlobalConfiguration::PREPROCESSSING_MAX_TIGHTEING_ROUND = 10; + +const bool GlobalConfiguration::WARM_START = false; + +const unsigned GlobalConfiguration::MAX_ITERATIONS_WITHOUT_PROGRESS = 10000; + +const unsigned GlobalConfiguration::PSE_ITERATIONS_BEFORE_RESET = 1000; +const double GlobalConfiguration::PSE_GAMMA_ERROR_THRESHOLD = 0.001; +const double GlobalConfiguration::PSE_GAMMA_UPDATE_TOLERANCE = 0.000000001; + +const double GlobalConfiguration::CONSTRAINT_COMPARISON_TOLERANCE = 0.00001; + +const double GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD = 0.6; + +const bool GlobalConfiguration::ONLY_AUX_INITIAL_BASIS = false; + +const GlobalConfiguration::ExplicitBasisBoundTighteningType + GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE = + GlobalConfiguration::COMPUTE_INVERTED_BASIS_MATRIX; +const bool GlobalConfiguration::EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION = false; +const double GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_ROUNDING_CONSTANT = 1e-6; + +const unsigned GlobalConfiguration::REFACTORIZATION_THRESHOLD = 100; +const GlobalConfiguration::BasisFactorizationType GlobalConfiguration::BASIS_FACTORIZATION_TYPE = + GlobalConfiguration::SPARSE_FORREST_TOMLIN_FACTORIZATION; + +const unsigned GlobalConfiguration::POLARITY_CANDIDATES_THRESHOLD = 5; + +const unsigned GlobalConfiguration::DNC_DEPTH_THRESHOLD = 5; + +const double GlobalConfiguration::MINIMAL_COEFFICIENT_FOR_TIGHTENING = 0.01; +const double GlobalConfiguration::LEMMA_CERTIFICATION_TOLERANCE = 0.000001; +const bool GlobalConfiguration::WRITE_JSON_PROOF = false; + +const unsigned GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH = 3; +const unsigned GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS = 10; + +#ifdef ENABLE_GUROBI +const unsigned GlobalConfiguration::GUROBI_NUMBER_OF_THREADS = 1; +const bool GlobalConfiguration::GUROBI_LOGGING = false; +#endif // ENABLE_GUROBI + +// Logging - note that it is enabled only in Debug mode +const bool GlobalConfiguration::DNC_MANAGER_LOGGING = false; +const bool GlobalConfiguration::ENGINE_LOGGING = false; +const bool GlobalConfiguration::TABLEAU_LOGGING = false; +const bool GlobalConfiguration::SMT_CORE_LOGGING = false; +const bool GlobalConfiguration::DANTZIGS_RULE_LOGGING = false; +const bool GlobalConfiguration::BASIS_FACTORIZATION_LOGGING = false; +const bool GlobalConfiguration::PREPROCESSOR_LOGGING = false; +const bool GlobalConfiguration::INPUT_QUERY_LOGGING = false; +const bool GlobalConfiguration::PROJECTED_STEEPEST_EDGE_LOGGING = false; +const bool GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING = false; +const bool GlobalConfiguration::QUERY_LOADER_LOGGING = false; +const bool GlobalConfiguration::SYMBOLIC_BOUND_TIGHTENER_LOGGING = false; +const bool GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING = false; +const bool GlobalConfiguration::MPS_PARSER_LOGGING = false; +const bool GlobalConfiguration::ONNX_PARSER_LOGGING = false; +const bool GlobalConfiguration::SOI_LOGGING = false; +const bool GlobalConfiguration::SCORE_TRACKER_LOGGING = false; +const bool GlobalConfiguration::CEGAR_LOGGING = false; + +const bool GlobalConfiguration::USE_SMART_FIX = false; +const bool GlobalConfiguration::USE_LEAST_FIX = false; + +void GlobalConfiguration::print() +{ + printf( "****************************\n" ); + printf( "*** Global Configuraiton ***\n" ); + printf( "****************************\n" ); + printf( " DEFAULT_EPSILON_FOR_COMPARISONS: %.15lf\n", DEFAULT_EPSILON_FOR_COMPARISONS ); + printf( " DEFAULT_DOUBLE_TO_STRING_PRECISION: %u\n", DEFAULT_DOUBLE_TO_STRING_PRECISION ); + printf( " STATISTICS_PRINTING_FREQUENCY: %u\n", STATISTICS_PRINTING_FREQUENCY ); + printf( " BOUND_COMPARISON_ADDITIVE_TOLERANCE: %.15lf\n", + BOUND_COMPARISON_ADDITIVE_TOLERANCE ); + printf( " BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE: %.15lf\n", + BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE ); + printf( " PIVOT_CHANGE_COLUMN_TOLERANCE: %.15lf\n", PIVOT_CHANGE_COLUMN_TOLERANCE ); + printf( " RATIO_CONSTRAINT_ADDITIVE_TOLERANCE: %.15lf\n", + RATIO_CONSTRAINT_ADDITIVE_TOLERANCE ); + printf( " RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE: %.15lf\n", + RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE ); + printf( " BASIC_COSTS_ADDITIVE_TOLERANCE: %.15lf\n", BASIC_COSTS_ADDITIVE_TOLERANCE ); + printf( " BASIC_COSTS_MULTIPLICATIVE_TOLERANCE: %.15lf\n", + BASIC_COSTS_MULTIPLICATIVE_TOLERANCE ); + printf( " DEGRADATION_CHECKING_FREQUENCY: %u\n", DEGRADATION_CHECKING_FREQUENCY ); + printf( " DEGRADATION_THRESHOLD: %.15lf\n", DEGRADATION_THRESHOLD ); + printf( " ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD: %.15lf\n", ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD ); + printf( " USE_COLUMN_MERGING_EQUATIONS: %s\n", USE_COLUMN_MERGING_EQUATIONS ? "Yes" : "No" ); + printf( " GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD: %.15lf\n", + GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD ); + printf( " MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS: %u\n", MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS ); + printf( " BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY: %u\n", + BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY ); + printf( " COST_FUNCTION_ERROR_THRESHOLD: %.15lf\n", COST_FUNCTION_ERROR_THRESHOLD ); + printf( " USE_HARRIS_RATIO_TEST: %s\n", USE_HARRIS_RATIO_TEST ? "Yes" : "No" ); + + printf( " PREPROCESS_INPUT_QUERY: %s\n", PREPROCESS_INPUT_QUERY ? "Yes" : "No" ); + printf( " PREPROCESSOR_ELIMINATE_VARIABLES: %s\n", + PREPROCESSOR_ELIMINATE_VARIABLES ? "Yes" : "No" ); + printf( " PSE_ITERATIONS_BEFORE_RESET: %u\n", PSE_ITERATIONS_BEFORE_RESET ); + printf( " PSE_GAMMA_ERROR_THRESHOLD: %.15lf\n", PSE_GAMMA_ERROR_THRESHOLD ); + printf( " CONSTRAINT_COMPARISON_TOLERANCE: %.15lf\n", CONSTRAINT_COMPARISON_TOLERANCE ); + + String basisBoundTighteningType; + switch ( EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE ) + { + case COMPUTE_INVERTED_BASIS_MATRIX: + basisBoundTighteningType = "Compute inverted basis matrix"; + break; + + case USE_IMPLICIT_INVERTED_BASIS_MATRIX: + basisBoundTighteningType = "Use implicit inverted basis matrix"; + break; + + default: + basisBoundTighteningType = "Unknown"; + break; + } + + printf( " EXPLICIT_BASIS_BOUND_TIGHTENING_INVERT_BASIS: %s\n", + basisBoundTighteningType.ascii() ); + printf( " EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION: %s\n", + EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION ? "Yes" : "No" ); + printf( " REFACTORIZATION_THRESHOLD: %u\n", REFACTORIZATION_THRESHOLD ); + + String basisFactorizationType; + if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == GlobalConfiguration::LU_FACTORIZATION ) + basisFactorizationType = "LU_FACTORIZATION"; + else if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == + GlobalConfiguration::SPARSE_LU_FACTORIZATION ) + basisFactorizationType = "SPARSE_LU_FACTORIZATION"; + else if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == + GlobalConfiguration::FORREST_TOMLIN_FACTORIZATION ) + basisFactorizationType = "FORREST_TOMLIN_FACTORIZATION"; + else + basisFactorizationType = "Unknown"; + + printf( " BASIS_FACTORIZATION_TYPE: %s\n", basisFactorizationType.ascii() ); + printf( "****************************\n" ); +} + +// +// Local Variables: +// compile-command: "make -C ../.. " +// tags-file-name: "../../TAGS" +// c-basic-offset: 4 +// End: +// diff --git a/src/nlr/GlobalConfiguration.h b/src/nlr/GlobalConfiguration.h new file mode 100644 index 0000000000..78ab9f662e --- /dev/null +++ b/src/nlr/GlobalConfiguration.h @@ -0,0 +1,315 @@ +/********************* */ +/*! \file GlobalConfiguration.h + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Parth Shah, Derek Huang + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + + **/ + +#ifndef __GlobalConfiguration_h__ +#define __GlobalConfiguration_h__ + +#include "DivideStrategy.h" + +class GlobalConfiguration +{ +public: + static void print(); + + // The exponential moving average is calculated as + // ema = current * alpha + previous * (1 - alpha) + static const double EXPONENTIAL_MOVING_AVERAGE_ALPHA; + + // Whether to use SoI instead of Reluplex for local search for satisfying assignments + // to non-linear constraint. + static bool USE_DEEPSOI_LOCAL_SEARCH; + + // The quantity by which the score is bumped up for PLContraints not + // participating in the SoI. This promotes those constraints in the branching + // order. + static const double SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI; + + // Use the polarity metrics to decide which branch to take first in a case split + // and how to repair a ReLU constraint. + static const bool USE_POLARITY_BASED_DIRECTION_HEURISTICS; + + // The default epsilon used for comparing doubles + static const double DEFAULT_EPSILON_FOR_COMPARISONS; + + // The precision level when convering doubles to strings + static const unsigned DEFAULT_DOUBLE_TO_STRING_PRECISION; + + // How often should the main loop print statistics? + static const unsigned STATISTICS_PRINTING_FREQUENCY; + static const unsigned STATISTICS_PRINTING_FREQUENCY_GUROBI; + + // Tolerance when checking whether the value computed for a basic variable is out of bounds + static const double BOUND_COMPARISON_ADDITIVE_TOLERANCE; + static const double BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE; + + // Tolerance when checking whether a basic variable depends on a non-basic variable, by looking + // at the change column, as part of a pivot operation. + static const double PIVOT_CHANGE_COLUMN_TOLERANCE; + + // Tolerance for the difference when computing the pivot entry by column and by row + static const double PIVOT_ROW_AND_COLUMN_TOLERANCE; + + // Tolerance when checking whether a non-basic variable is eligible for being selected as the + // entering variable, by its reduced cost + static const double ENTRY_ELIGIBILITY_TOLERANCE; + + // Ratio test tolerance constants + static const double RATIO_CONSTRAINT_ADDITIVE_TOLERANCE; + static const double RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE; + static const double HARRIS_RATIO_CONSTRAINT_ADDITIVE_TOLERANCE; + static const double HARRIS_RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE; + + // Cost function tolerance constants + static const double BASIC_COSTS_ADDITIVE_TOLERANCE; + static const double BASIC_COSTS_MULTIPLICATIVE_TOLERANCE; + + // Sparse ForrestTomlin diagonal element tolerance constant + static const double SPARSE_FORREST_TOMLIN_DIAGONAL_ELEMENT_TOLERANCE; + + // Toggle use of Harris' two-pass ratio test for selecting the leaving variable + static const bool USE_HARRIS_RATIO_TEST; + + // Toggle query-preprocessing on/off. + static const bool PREPROCESS_INPUT_QUERY; + + // Assuming the preprocessor is on, toggle whether or not it will attempt to perform variable + // elimination. + static const bool PREPROCESSOR_ELIMINATE_VARIABLES; + + // Toggle whether or not PL/NL constraints will be called upon + // to add auxiliary variables and equations after preprocessing. + static const bool PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING; + static const bool NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING; + + // If the difference between a variable's lower and upper bounds is smaller than this + // threshold, the preprocessor will treat it as fixed. + static const double PREPROCESSOR_ALMOST_FIXED_THRESHOLD; + + // Maximal rounds of tightening to perform in the preprocessor to avoid non-termination. + static const unsigned PREPROCESSSING_MAX_TIGHTEING_ROUND; + + // Try to set the initial tableau assignment to an assignment that is legal with + // respect to the input network. + static const bool WARM_START; + + // The maximal number of iterations without new tree states being visited, before + // the engine performs a precision restoration. + static const unsigned MAX_ITERATIONS_WITHOUT_PROGRESS; + + // How often should the main loop check the current degradation? + static const unsigned DEGRADATION_CHECKING_FREQUENCY; + + // The threshold of degradation above which restoration is required + static const double DEGRADATION_THRESHOLD; + + // If a pivot element in a simplex iteration is smaller than this threshold, the engine will + // attempt to pick another element. + static const double ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD; + + // If true, column-merging equations are given special treatment and cause columns in the + // tableau to be merged (instead of a new row added). + static const bool USE_COLUMN_MERGING_EQUATIONS; + + // If a pivot element in a Gaussian elimination iteration is smaller than this threshold times + // the largest element in the column, the elimination engine will attempt to pick another pivot. + static const double GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD; + + // How many potential pivots should the engine inspect (at most) in every simplex iteration? + static const unsigned MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS; + + static const DivideStrategy SPLITTING_HEURISTICS; + + // The frequency to use interval splitting when largest interval splitting strategy is in use. + static const unsigned INTERVAL_SPLITTING_FREQUENCY; + + // When automatically deciding which splitting strategy to use, we use relu-splitting if + // the number of inputs is larger than this number. + static const unsigned INTERVAL_SPLITTING_THRESHOLD; + + // How often should we perform full bound tightening, on the entire contraints matrix A. + static const unsigned BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY; + + // When the row bound tightener is asked to run until saturation, it can enter an infinite loop + // due to tiny increments in bounds. This number limits the number of iterations it can perform. + static const unsigned ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS; + + // If the cost function error exceeds this threshold, it is recomputed + static const double COST_FUNCTION_ERROR_THRESHOLD; + + // Random seed for generating simulation values. + static const unsigned SIMULATION_RANDOM_SEED; + + // Random seed for EstimateVolume procedure (PreimageApproximation). + static const unsigned VOLUME_ESTIMATION_RANDOM_SEED; + + // Number of iterations for EstimateVolume procedure (PreimageApproximation). + static const unsigned VOLUME_ESTIMATION_ITERATIONS; + + // Random seed for PreimageApproximation optimization. + static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED; + + // Maximum iterations for PreimageApproximation optimization. + static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + + // Step size for PreimageApproximation optimization. + static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + + // How often should projected steepest edge reset the reference space? + static const unsigned PSE_ITERATIONS_BEFORE_RESET; + + // An error threshold which, when crossed, causes projected steepest edge to reset the reference + // space + static const double PSE_GAMMA_ERROR_THRESHOLD; + + // PSE's Gamma function's update tolerance + static const double PSE_GAMMA_UPDATE_TOLERANCE; + + // The tolerance for checking whether f = Constraint( b ), Constraint \in { ReLU, ABS, Sign} + static const double CONSTRAINT_COMPARISON_TOLERANCE; + + // Toggle between two types of LSE lower bound for softmax + static const double SOFTMAX_LSE2_THRESHOLD; + + // Should the initial basis be comprised only of auxiliary (row) variables? + static const bool ONLY_AUX_INITIAL_BASIS; + + /* + Explicit (Reluplex-style) bound tightening options + */ + + enum ExplicitBasisBoundTighteningType { + // Compute the inverse basis matrix and use it + COMPUTE_INVERTED_BASIS_MATRIX = 0, + // Use the inverted basis matrix without using it, via transformations + USE_IMPLICIT_INVERTED_BASIS_MATRIX = 1, + // Disable explicit basis bound tightening + DISABLE_EXPLICIT_BASIS_TIGHTENING = 2, + }; + + // When doing bound tightening using the explicit basis matrix, should the basis matrix be + // inverted? + static const ExplicitBasisBoundTighteningType EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE; + static const double EXPLICIT_BASIS_BOUND_TIGHTENING_ROUNDING_CONSTANT; + + // When doing explicit bound tightening, should we repeat until saturation? + static const bool EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION; + + /* + Symbolic bound tightening options + */ + + // Symbolic tightening, LP rounding constants + static const double SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; + static const double LP_TIGHTENING_ROUNDING_CONSTANT; + + static const double SIGMOID_CUTOFF_CONSTANT; + + /* + Constraint fixing heuristics + */ + + // When a PL constraint proposes a fix that affects multiple variables, should it first query + // for any relevant linear connections between the variables? + static const bool USE_SMART_FIX; + + // A heuristic for selecting which of the broken PL constraints will be repaired next. In this + // case, the one that has been repaired the least number of times so far. + static const bool USE_LEAST_FIX; + + /* + Basis factorization options + */ + + // The number of accumualted eta matrices, after which the basis will be refactorized + static const unsigned REFACTORIZATION_THRESHOLD; + + // The kind of basis factorization algorithm in use + enum BasisFactorizationType { + LU_FACTORIZATION, + SPARSE_LU_FACTORIZATION, + FORREST_TOMLIN_FACTORIZATION, + SPARSE_FORREST_TOMLIN_FACTORIZATION, + }; + static const BasisFactorizationType BASIS_FACTORIZATION_TYPE; + + /* In the polarity-based branching heuristics, only this many earliest nodes + are considered to branch on. + */ + static const unsigned POLARITY_CANDIDATES_THRESHOLD; + + /* The max number of DnC splits + */ + static const unsigned DNC_DEPTH_THRESHOLD; + + /* Minimal coefficient of a variable in a Tableau row, that is used for bound tightening + */ + static const double MINIMAL_COEFFICIENT_FOR_TIGHTENING; + + /* The tolerance of errors when checking lemmas in the proof-checking process + */ + static const double LEMMA_CERTIFICATION_TOLERANCE; + + /* Denote whether proofs should be written as a JSON file + */ + static const bool WRITE_JSON_PROOF; + + /* How many layers after the current layer do we encode in backward analysis. + */ + static const unsigned BACKWARD_BOUND_PROPAGATION_DEPTH; + + /* How many rounds of backward analysis to perform? + */ + static const unsigned MAX_ROUNDS_OF_BACKWARD_ANALYSIS; + +#ifdef ENABLE_GUROBI + /* + The number of threads Gurobi spawns + */ + static const unsigned GUROBI_NUMBER_OF_THREADS; + static const bool GUROBI_LOGGING; +#endif // ENABLE_GUROBI + + /* + Logging options + */ + static const bool DNC_MANAGER_LOGGING; + static const bool ENGINE_LOGGING; + static const bool TABLEAU_LOGGING; + static const bool SMT_CORE_LOGGING; + static const bool DANTZIGS_RULE_LOGGING; + static const bool BASIS_FACTORIZATION_LOGGING; + static const bool PREPROCESSOR_LOGGING; + static const bool INPUT_QUERY_LOGGING; + static const bool PROJECTED_STEEPEST_EDGE_LOGGING; + static const bool GAUSSIAN_ELIMINATION_LOGGING; + static const bool QUERY_LOADER_LOGGING; + static const bool SYMBOLIC_BOUND_TIGHTENER_LOGGING; + static const bool NETWORK_LEVEL_REASONER_LOGGING; + static const bool MPS_PARSER_LOGGING; + static const bool ONNX_PARSER_LOGGING; + static const bool SOI_LOGGING; + static const bool SCORE_TRACKER_LOGGING; + static const bool CEGAR_LOGGING; +}; + +#endif // __GlobalConfiguration_h__ + +// +// Local Variables: +// compile-command: "make -C .. " +// tags-file-name: "../TAGS" +// c-basic-offset: 4 +// End: +// diff --git a/src/nlr/GradientDescent.h b/src/nlr/GradientDescent.h new file mode 100644 index 0000000000..e86aca17d9 --- /dev/null +++ b/src/nlr/GradientDescent.h @@ -0,0 +1,315 @@ +/* + * Copyright Nick Thompson, 2024 + * Use, modification and distribution are subject to the + * Boost Software License, Version 1.0. (See accompanying file + * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) + */ +#ifndef GRADIENT_DESCENT +#define GRADIENT_DESCENT + +#include "FloatUtils.h" +#include "GlobalConfiguration.h" + +#include // for std::sort +#include +#include +#include +#include +#include +#include +#include // for std::false_type +#include +#include + +namespace NLR { + +template struct has_resize : std::false_type +{ +}; + +template +struct has_resize().resize( size_t{} ) )>> : std::true_type +{ +}; + +template constexpr bool has_resize_v = has_resize::value; + +template +void validate_bounds( ArgumentContainer const &lower_bounds, ArgumentContainer const &upper_bounds ) +{ + std::ostringstream oss; + + if ( lower_bounds.size() == 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The dimension of the problem cannot be zero."; + throw std::domain_error( oss.str() ); + } + + if ( upper_bounds.size() != lower_bounds.size() ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be the same number of lower bounds as upper bounds, but given "; + oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() + << " lower bounds."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < lower_bounds.size(); ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + if ( lb > ub ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be greater than or equal to the lower bound, but the " + "upper bound is " + << ub << " and the lower is " << lb << "."; + throw std::domain_error( oss.str() ); + } + + if ( !FloatUtils::isFinite( lb ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The lower bound must be finite, but got " << lb << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a " + "standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); + } + + if ( !FloatUtils::isFinite( ub ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be finite, but got " << ub << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a " + "standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); + } + } +} + +template +std::vector random_initial_population( ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds, + size_t initial_population_size, + URBG &&gen ) +{ + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + constexpr bool has_resize = has_resize_v; + std::vector population( initial_population_size ); + auto const dimension = lower_bounds.size(); + + for ( size_t i = 0; i < population.size(); ++i ) + { + if constexpr ( has_resize ) + population[i].resize( dimension ); + else + { + // Argument type must be known at compile-time; like std::array: + if ( population[i].size() != dimension ) + { + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": For containers which do not have resize, the default size must be the " + "same as the dimension, "; + oss << "but the default container size is " << population[i].size() + << " and the dimension of the problem is " << dimension << "."; + oss << " The function argument container type is " + << typeid( ArgumentContainer ).name() << ".\n"; + throw std::runtime_error( oss.str() ); + } + } + } + + // Why don't we provide an option to initialize with (say) a Gaussian distribution? + // > If the optimum's location is fairly well known, + // > a Gaussian distribution may prove somewhat faster, although it + // > may also increase the probability that the population will converge prematurely. + // > In general, uniform distributions are preferred, since they best reflect + // > the lack of knowledge about the optimum's location. + // - Differential Evolution: A Practical Approach to Global Optimization + // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are + // preferable. So this is something that could be investigated and potentially improved. + std::uniform_real_distribution dis( DimensionlessReal( 0 ), + DimensionlessReal( 1 ) ); + for ( size_t i = 0; i < population.size(); ++i ) + { + for ( size_t j = 0; j < dimension; ++j ) + { + auto const &lb = lower_bounds[j]; + auto const &ub = upper_bounds[j]; + population[i][j] = lb + dis( gen ) * ( ub - lb ); + } + } + + return population; +} + +template +void validate_initial_guess( ArgumentContainer const &initial_guess, + ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds ) +{ + std::ostringstream oss; + auto const dimension = lower_bounds.size(); + + if ( initial_guess.size() != dimension ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The initial guess must have the same dimensions as the problem,"; + oss << ", but the problem size is " << dimension << " and the initial guess has " + << initial_guess.size() << " elements."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < dimension; ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + + if ( !FloatUtils::isFinite( initial_guess[i] ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": At index " << i << ", the initial guess is " << initial_guess[i] + << ", make sure all elements of the initial guess are finite."; + throw std::domain_error( oss.str() ); + } + + if ( initial_guess[i] < lb || initial_guess[i] > ub ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": At index " << i << " the initial guess " << initial_guess[i] + << " is not in the bounds [" << lb << ", " << ub << "]."; + throw std::domain_error( oss.str() ); + } + } +} + +// Single-Threaded Gradient Descent + +// We provide the parameters in a struct-there are too many of them and they are too unwieldy to +// pass individually: +template struct gradient_descent_parameters +{ + using Real = typename ArgumentContainer::value_type; + ArgumentContainer lower_bounds; + ArgumentContainer upper_bounds; + + Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + // size_t max_iterations = + // GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + size_t max_iterations = 2; + bool maximize = false; + Real weight_decay = 0; + // Real lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + Real lr = 0.001; + ArgumentContainer const *initial_guess = nullptr; +}; + +template +void validate_gradient_descent_parameters( + gradient_descent_parameters const &cd_params ) +{ + std::ostringstream oss; + validate_bounds( cd_params.lower_bounds, cd_params.upper_bounds ); + + if ( FloatUtils::isNan( cd_params.step_size ) || cd_params.step_size <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": step_size > 0 is required, but got step_size=" << cd_params.step_size << "."; + throw std::domain_error( oss.str() ); + } + + if ( cd_params.max_iterations < 1 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be at least one generation."; + throw std::invalid_argument( oss.str() ); + } + + if ( FloatUtils::isNan( cd_params.lr ) || cd_params.lr <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": lr > 0 is required, but got lr=" << cd_params.lr << "."; + throw std::domain_error( oss.str() ); + } + + if ( FloatUtils::isNan( cd_params.weight_decay ) || cd_params.weight_decay < 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": weight_decay >= 0 is required, but got weight_decay=" << cd_params.weight_decay + << "."; + throw std::domain_error( oss.str() ); + } + + if ( cd_params.initial_guess ) + { + validate_initial_guess( + *cd_params.initial_guess, cd_params.lower_bounds, cd_params.upper_bounds ); + } +} + +template +ArgumentContainer gradient_descent( + const Func cost_function, + gradient_descent_parameters const &cd_params, + URBG &gen, + std::invoke_result_t target_value, + bool *cancellation = nullptr, + std::vector>> + *queries = nullptr, + std::invoke_result_t *current_minimum_cost = nullptr ) +{ + using ResultType = std::invoke_result_t; + + validate_gradient_descent_parameters( cd_params ); + const size_t dimension = cd_params.lower_bounds.size(); + auto step_size = cd_params.step_size; + auto max_iterations = cd_params.max_iterations; + auto maximize = cd_params.maximize; + auto lr = cd_params.lr; + auto weight_decay = cd_params.weight_decay; + auto guess = + random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); + + if ( cd_params.initial_guess ) + { + guess[0] = *cd_params.initial_guess; + } + + std::vector candidates( dimension ); + std::vector gradient( dimension ); + ArgumentContainer current_minimum = guess[0]; + + for ( size_t i = 0; i < max_iterations; ++i ) + { + for ( size_t j = 0; j < dimension; ++j ) + { + candidates[j] = guess[0]; + candidates[j][j] += step_size; + + size_t sign = ( maximize == false ? 1 : -1 ); + double cost = cost_function( candidates[j] ); + gradient[j] = sign * cost / step_size + weight_decay * guess[0][j]; + + if ( !FloatUtils::isNan( target_value ) && cost <= target_value ) + { + guess[0] = candidates[j]; + break; + } + } + + for ( size_t j = 0; j < dimension; ++j ) + { + guess[0][j] -= lr * gradient[j]; + } + } + + return guess[0]; +} + +} // namespace NLR +#endif diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 2189b563ca..c7f3691157 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -326,7 +326,7 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map // Clean up clearSolverQueue( freeSolvers ); - + if ( threads ) { delete[] threads; @@ -339,42 +339,42 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map void LPFormulator::optimizeBoundsWithInvprop( const Map &layers ) { - } void LPFormulator::optimizeBoundsWithPreimageApproximation( const Map &layers ) { // Define EstimateVolume bind which captures layers and only receives coeffs as its input. auto EstimateVolumeBind = std::bind( EstimateVolume, layers, std::placeholders::_1 ); - + // Search over coeffs in [0, 1]^depth. Enforce single-threaded optimization. - auto opt_params = coordinate_descent_parameters>(); + auto opt_params = adam_optimizer_parameters>(); unsigned depth = layers.size(); opt_params.lower_bounds.resize( depth, 0 ); opt_params.upper_bounds.resize( depth, 1 ); double value_to_reach = 0; std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); - - // Find optimal coeffs and run final parameterised symbolic bound tightening + backward LP relaxation. - auto optimal_coeffs = coordinate_descent( EstimateVolumeBind, opt_params, rng, value_to_reach ); - + + // Find optimal coeffs and run final parameterised symbolic bound tightening + backward LP + // relaxation. + auto optimal_coeffs = adam_optimizer( EstimateVolumeBind, opt_params, rng, value_to_reach ); + for ( unsigned i = 0; i < layers.size(); ++i ) layers[i]->computeParameterisedSymbolicBounds( optimal_coeffs[i], true ); - + optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs ); } -double LPFormulator::EstimateVolume( const Map &layers, std::vector coeffs ) +double LPFormulator::EstimateVolume( const Map &layers, + std::vector coeffs ) { // First, run parameterised symbolic bound propagation. for ( unsigned i = 0; i < layers.size(); ++i ) layers[i]->computeParameterisedSymbolicBounds( coeffs[i] ); - + std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); double average = 0; for ( unsigned i = 0; i < GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; ++i ) { - // Sample point from known bounds. Map point; for ( auto pair : layers ) @@ -385,16 +385,17 @@ double LPFormulator::EstimateVolume( const Map &layers, std:: { if ( layer->neuronEliminated( index ) ) continue; - + const NeuronIndex neuron( layerIndex, index ); double lb = layer->getLb( index ); double ub = layer->getUb( index ); std::uniform_real_distribution<> dis( lb, ub ); - point.insert( neuron , dis( rng ) ); + point.insert( neuron, dis( rng ) ); } } - - // Compute the average sigmoid of log-sum-exp of points' difference from bounding hyperplanes. + + // Compute the average sigmoid of log-sum-exp of points' difference from bounding + // hyperplanes. double sum_exp = 0; for ( auto pair : layers ) { @@ -403,17 +404,17 @@ double LPFormulator::EstimateVolume( const Map &layers, std:: { if ( layer->neuronEliminated( index ) ) continue; - + double margin = layer->calculateDifferenceFromSymbolic( point, index ); if ( FloatUtils::wellFormed( std::exp( margin ) ) ) sum_exp += std::exp( margin ); } } - + double slse = 1 / ( 1 + ( 1 / sum_exp ) ); average += slse / GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; } - + return average; } @@ -490,7 +491,9 @@ void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map coeffs ) +void LPFormulator::optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args, + bool backward, + std::vector coeffs ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -581,16 +584,16 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args threads[i].interrupt(); threads[i].join(); } - + // Clean up clearSolverQueue( freeSolvers ); - + if ( threads ) { delete[] threads; threads = NULL; } - + throw InfeasibleQueryException(); } @@ -666,8 +669,9 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & { LPFormulator_LOG( Stringf( "Computing upperbound..." ).ascii() ); double ub = optimizeWithGurobi( - *gurobi, MinOrMax::MAX, variableName, cutoffValue, &infeasible ) + - GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT;; + *gurobi, MinOrMax::MAX, variableName, cutoffValue, &infeasible ) + + GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT; + ; LPFormulator_LOG( Stringf( "Upperbound computed %f", ub ).ascii() ); // Store the new bound if it is tighter @@ -697,8 +701,8 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & LPFormulator_LOG( Stringf( "Computing lowerbound..." ).ascii() ); gurobi->reset(); double lb = optimizeWithGurobi( - *gurobi, MinOrMax::MIN, variableName, cutoffValue, &infeasible ) - - GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT; + *gurobi, MinOrMax::MIN, variableName, cutoffValue, &infeasible ) - + GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT; LPFormulator_LOG( Stringf( "Lowerbound computed: %f", lb ).ascii() ); // Store the new bound if it is tighter if ( lb > currentLb ) @@ -773,7 +777,7 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers double coeff = coeffs[currentLayerIndex]; addLayerToParameterisedModel( gurobi, currentLayer, true, coeff ); } - + for ( const auto &nextLayer : currentLayer->getSuccessorLayers() ) { if ( layerToDepth.exists( nextLayer ) ) @@ -802,7 +806,7 @@ void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, case Layer::WEIGHTED_SUM: addWeightedSumLayerToLpRelaxation( gurobi, layer, createVariables ); break; - + case Layer::ROUND: addRoundLayerToLpRelaxation( gurobi, layer, createVariables ); break; @@ -810,7 +814,7 @@ void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, case Layer::LEAKY_RELU: addLeakyReluLayerToLpRelaxation( gurobi, layer, createVariables ); break; - + case Layer::ABSOLUTE_VALUE: addAbsoluteValueLayerToLpRelaxation( gurobi, layer, createVariables ); break; @@ -822,15 +826,15 @@ void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, case Layer::MAX: addMaxLayerToLpRelaxation( gurobi, layer, createVariables ); break; - + case Layer::SIGMOID: addSigmoidLayerToLpRelaxation( gurobi, layer, createVariables ); break; - + case Layer::SOFTMAX: addSoftmaxLayerToLpRelaxation( gurobi, layer, createVariables ); - break; - + break; + case Layer::BILINEAR: addBilinearLayerToLpRelaxation( gurobi, layer, createVariables ); break; @@ -942,8 +946,8 @@ void LPFormulator::addReluLayerToLpRelaxation( GurobiWrapper &gurobi, } void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) + const Layer *layer, + bool createVariables ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { @@ -959,7 +963,7 @@ void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, { // If the source neuron has been eliminated, this neuron is constant double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = FloatUtils::round(sourceValue); + double targetValue = FloatUtils::round( sourceValue ); gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); @@ -973,11 +977,11 @@ void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); - double ub = std::max( FloatUtils::round( sourceUb ), layer->getUb( i ) ); - double lb = std::min( FloatUtils::round( sourceLb ), layer->getLb( i ) ); - + double ub = std::min( FloatUtils::round( sourceUb ), layer->getUb( i ) ); + double lb = std::max( FloatUtils::round( sourceLb ), layer->getLb( i ) ); + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - + // If u = l: y = round(u) if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) { @@ -985,29 +989,29 @@ void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); gurobi.addEqConstraint( terms, ub ); } - + else { - List terms; - // y <= x + 0.5, i.e. y - x <= 0.5 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, 0.5 ); - - // y >= x - 0.5, i.e. y - x >= -0.5 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, -0.5 ); + List terms; + // y <= x + 0.5, i.e. y - x <= 0.5 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, 0.5 ); + + // y >= x - 0.5, i.e. y - x >= -0.5 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, -0.5 ); } } } } void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) + const Layer *layer, + bool createVariables ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { @@ -1036,10 +1040,6 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, String sourceName = Stringf( "x%u", sourceVariable ); if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - double ub = std::min( FloatUtils::max( -sourceLb, sourceUb ), layer->getUb( i ) ); - - gurobi.addVariable(Stringf( "x%u", targetVariable ), 0, ub ); if ( !FloatUtils::isNegative( sourceLb ) ) { @@ -1047,6 +1047,10 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, if ( sourceLb < 0 ) sourceLb = 0; + double ub = std::min( sourceUb, layer->getUb( i ) ); + double lb = std::max( sourceLb, layer->getLb( i ) ); + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); @@ -1054,6 +1058,10 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, } else if ( !FloatUtils::isPositive( sourceUb ) ) { + double ub = std::min( -sourceLb, layer->getUb( i ) ); + double lb = std::max( -sourceUb, layer->getLb( i ) ); + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + // The AbsoluteValue is inactive, y = -x, i.e. y + x = 0 List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1062,6 +1070,9 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, } else { + double ub = std::min( std::max( -sourceLb, sourceUb ), layer->getUb( i ) ); + double lb = std::max( 0.0, layer->getLb( i ) ); + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); /* The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). */ @@ -1081,8 +1092,8 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, } void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) + const Layer *layer, + bool createVariables ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { @@ -1098,7 +1109,7 @@ void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, { // If the source neuron has been eliminated, this neuron is constant double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = SigmoidConstraint::sigmoid(sourceValue); + double targetValue = SigmoidConstraint::sigmoid( sourceValue ); gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); @@ -1111,12 +1122,15 @@ void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, String sourceName = Stringf( "x%u", sourceVariable ); if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - double ub = std::min( SigmoidConstraint::sigmoid( sourceUb ), layer->getUb( i ) ); - double lb = std::max( SigmoidConstraint::sigmoid( sourceLb ), layer->getLb( i ) ); - + + double sourceUbSigmoid = SigmoidConstraint::sigmoid( sourceUb ); + double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); + + double ub = std::min( sourceUbSigmoid, layer->getUb( i ) ); + double lb = std::max( sourceLbSigmoid, layer->getLb( i ) ); + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - + // If u = l: y = sigmoid(u) if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) { @@ -1125,49 +1139,57 @@ void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); gurobi.addEqConstraint( terms, ub ); } - + else { List terms; double lambda = ( ub - lb ) / ( sourceUb - sourceLb ); double lambdaPrime = std::min( SigmoidConstraint::sigmoidDerivative( sourceLb ), - SigmoidConstraint::sigmoidDerivative( sourceUb ) ); - + SigmoidConstraint::sigmoidDerivative( sourceUb ) ); + // update lower bound if ( FloatUtils::isPositive( sourceLb ) ) - { - // y >= lambda * (x - l), i.e. y - lambda * x >= lambda * l + { + // y >= lambda * (x - l) + sigmoid(lb), i.e. y - lambda * x >= sigmoid(lb) - + // lambda * l terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, sourceLb * lambda ); + terms.append( + GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, sourceLbSigmoid - sourceLb * lambda ); } - + else { - // y >= lambda' * (x - l), i.e. y - lambda' * x >= lambda' * l + // y >= lambda' * (x - l) + sigmoid(lb), i.e. y - lambda' * x >= sigmoid(lb) - + // lambda' * l terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, sourceLb * lambdaPrime ); + terms.append( + GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, sourceLbSigmoid - sourceLb * lambdaPrime ); } - + // update upper bound if ( !FloatUtils::isPositive( sourceUb ) ) { - // y <= lambda * (x - u), i.e. y - lambda * x <= lambda * u + // y <= lambda * (x - u) + sigmoid(ub), i.e. y - lambda * x <= sigmoid(ub) - + // lambda * u terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, sourceUb * lambda ); + terms.append( + GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, sourceUbSigmoid - sourceUb * lambda ); } else { - // y <= lambda' * (x - u), i.e. y - lambda' * x <= lambda' * u + // y <= lambda' * (x - u) + sigmoid(ub), i.e. y - lambda' * x <= sigmoid(ub) - + // lambda' * u terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, sourceUb * lambdaPrime ); + terms.append( + GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, sourceUbSigmoid - sourceUb * lambdaPrime ); } } } @@ -1338,14 +1360,14 @@ void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, } void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) + const Layer *layer, + bool createVariables ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { if ( layer->neuronEliminated( i ) ) continue; - + Set handledInputNeurons; List sources = layer->getActivationSources( i ); @@ -1364,7 +1386,7 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, String sourceName = Stringf( "x%u", sourceVariable ); if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); - + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); sourceMids.append( ( sourceLb + sourceUb ) / 2 ); @@ -1385,12 +1407,18 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, } } - double ub = std::min( DeepPolySoftmaxElement::linearUpperBound( sourceLbs, sourceUbs, index ), layer->getUb( i ) ); - double lb = std::max( DeepPolySoftmaxElement::linearLowerBound( sourceLbs, sourceUbs, index ), layer->getLb( i ) ); - + double ub = + std::min( DeepPolySoftmaxElement::linearUpperBound( sourceLbs, sourceUbs, index ), + layer->getUb( i ) ); + double lb = + std::max( DeepPolySoftmaxElement::linearLowerBound( sourceLbs, sourceUbs, index ), + layer->getLb( i ) ); + targetLbs[index] = lb; + targetUbs[index] = ub; + unsigned targetVariable = layer->neuronToVariable( i ); gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - + double bias; SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); @@ -1417,15 +1445,17 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, { terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); + bias = DeepPolySoftmaxElement::LSELowerBound( + sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = - DeepPolySoftmaxElement::dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + double dldj = DeepPolySoftmaxElement::dLSELowerBound( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( + GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); bias -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -1435,15 +1465,17 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, { terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); + bias = DeepPolySoftmaxElement::LSELowerBound2( + sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = - DeepPolySoftmaxElement::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + double dldj = DeepPolySoftmaxElement::dLSELowerBound2( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( + GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); bias -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -1452,15 +1484,16 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + bias = DeepPolySoftmaxElement::LSEUpperBound( + sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = - DeepPolySoftmaxElement::dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + double dudj = DeepPolySoftmaxElement::dLSEUpperbound( + sourceMids, targetLbs, targetUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); bias -= dudj * sourceMids[inputIndex]; ++inputIndex; @@ -1471,32 +1504,34 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, { terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + bias = + DeepPolySoftmaxElement::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); unsigned inputIndex = 0; for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = - DeepPolySoftmaxElement::dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + double dldj = DeepPolySoftmaxElement::dERLowerBound( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dldj * sourceMids[inputIndex]; + bias -= dldj * sourceMids[inputIndex]; ++inputIndex; } gurobi.addGeqConstraint( terms, bias ); terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); + bias = + DeepPolySoftmaxElement::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = - DeepPolySoftmaxElement::dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + double dudj = DeepPolySoftmaxElement::dERUpperBound( + sourceMids, targetLbs, targetUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); bias -= dudj * sourceMids[inputIndex]; ++inputIndex; @@ -1507,8 +1542,9 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, } } -void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, - bool createVariables ) +void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { @@ -1519,12 +1555,11 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const List sources = layer->getActivationSources( i ); const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - + Vector sourceLbs; Vector sourceUbs; Vector sourceValues; Vector sourceNeurons; - unsigned counter = 0; bool allConstant = true; for ( const auto &sourceIndex : sources ) { @@ -1532,15 +1567,14 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); - + sourceNeurons.append( sourceNeuron ); - sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); - ++counter; - + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) { allConstant = false; @@ -1550,9 +1584,8 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); sourceValues.append( sourceValue ); } - ++counter; } - + if ( allConstant ) { // If the both source neurons have been eliminated, this neuron is constant @@ -1560,7 +1593,7 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); continue; } - + double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); List values = { sourceLbs[0] * sourceLbs[1], @@ -1574,22 +1607,30 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const if ( v > ub ) ub = v; } - + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - + // Lower bound: out >= l_y * x + l_x * y - l_x * l_y List terms; terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -sourceLbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); - terms.append( GurobiWrapper::Term( -sourceLbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + terms.append( GurobiWrapper::Term( + -sourceLbs[1], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( + -sourceLbs[0], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addGeqConstraint( terms, -sourceLbs[0] * sourceLbs[1] ); - + // Upper bound: out <= u_y * x + l_x * y - l_x * u_y terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -sourceUbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); - terms.append( GurobiWrapper::Term( -sourceLbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + terms.append( GurobiWrapper::Term( + -sourceUbs[1], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( + -sourceLbs[0], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addLeqConstraint( terms, -sourceLbs[0] * sourceUbs[1] ); } } @@ -1755,7 +1796,6 @@ void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, { switch ( layer->getLayerType() ) { - case Layer::RELU: addReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); break; @@ -1763,11 +1803,11 @@ void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, case Layer::LEAKY_RELU: addLeakyReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); break; - + case Layer::SIGN: addSignLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); break; - + default: addLayerToModel( gurobi, layer, createVariables ); break; @@ -1844,7 +1884,8 @@ void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurob terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); gurobi.addGeqConstraint( terms, 0 ); - // y >= coeff * x, i.e. y - coeff * x >= 0 (varies continuously between y >= 0 and y >= alpha * x). + // y >= coeff * x, i.e. y - coeff * x >= 0 (varies continuously between y >= 0 and + // y >= alpha * x). terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -coeff, Stringf( "x%u", sourceVariable ) ) ); @@ -1928,23 +1969,26 @@ void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurob /* 2 - y <= ----- * (1 - coeff) x + 1 (varies continuously between y <= 1 and y <= -2 / l * x + 1). + y <= ----- * (1 - coeff) x + 1 (varies continuously between y <= 1 and y <= -2 / l * + x + 1). - l */ List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( 2.0 / sourceLb * (1 - coeff), Stringf( "x%u", sourceVariable ) ) ); + terms.append( GurobiWrapper::Term( 2.0 / sourceLb * ( 1 - coeff ), + Stringf( "x%u", sourceVariable ) ) ); gurobi.addLeqConstraint( terms, 1 ); /* 2 - y >= ----- * (1 - coeff) x - 1 (varies continuously between y >= -1 and y >= 2 / u * x - 1). + y >= ----- * (1 - coeff) x - 1 (varies continuously between y >= -1 and y >= 2 / u * + x - 1). u */ terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( - GurobiWrapper::Term( -2.0 / sourceUb * (1 - coeff), Stringf( "x%u", sourceVariable ) ) ); + terms.append( GurobiWrapper::Term( -2.0 / sourceUb * ( 1 - coeff ), + Stringf( "x%u", sourceVariable ) ) ); gurobi.addGeqConstraint( terms, -1 ); } } @@ -2002,7 +2046,7 @@ void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper & // The LeakyReLU is inactive, y = alpha * x List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( slope, Stringf( "x%u", sourceVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); gurobi.addEqConstraint( terms, 0 ); } else @@ -2014,15 +2058,17 @@ void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper & /* The phase of this LeakyReLU is not yet fixed. For y = LeakyReLU(x), we add the following triangular relaxation: - 1. y >= ((1 - alpha) * coeff + alpha) * x (varies continuously between y >= alpha * x and y >= x). + 1. y >= ((1 - alpha) * coeff + alpha) * x (varies continuously between y >= + alpha * x and y >= x). 2. y >= x 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) */ - // y >= ((alpha+1) * coeff - alpha) * x + // y >= ((1 - alpha) * coeff + alpha) * x List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -slope - ( 1 - slope ) * coeff, Stringf( "x%u", sourceVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope - ( 1 - slope ) * coeff, + Stringf( "x%u", sourceVariable ) ) ); gurobi.addGeqConstraint( terms, 0 ); // y >= x, i.e. y - x >= 0 diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index 41f5ac1a3f..f1a5849004 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -16,7 +16,7 @@ #ifndef __LPFormulator_h__ #define __LPFormulator_h__ -#include "CoordinateDescent.h" +#include "AdamOptimizer.h" #include "GurobiWrapper.h" #include "LayerOwner.h" #include "Map.h" @@ -110,47 +110,60 @@ class LPFormulator : public ParallelSolver void addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - + void addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - - void - addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - - void - addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - - void - addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - - void - addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - + + void addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ); + + void addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ); + + void addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ); + + void addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ); + void addWeightedSumLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - void optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args, bool backward, std::vector coeffs = {} ); - - - // Create LP relaxations depending on an external parameter. + void optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args, + bool backward, + std::vector coeffs = {} ); + + + // Create LP relaxations depending on an external parameter. void addLayerToParameterisedModel( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ); - - void - addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - - void - addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - - void - addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - - + const Layer *layer, + bool createVariables, + double coeff ); + + void addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ); + + void addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ); + + void addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ); + + // Estimate Volume of parameterised LP relaxations. - static double EstimateVolume( const Map &layers, std::vector coeffs ); + static double EstimateVolume( const Map &layers, + std::vector coeffs ); /* diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 644c056f8d..b53777488e 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -417,7 +417,7 @@ void Layer::computeSimulations() for ( unsigned j = 0; j < simulationSize; ++j ) { _simulations[i][j] = FloatUtils::negativeInfinity(); - + Vector inputs; Vector outputs; unsigned outputIndex = 0; @@ -433,7 +433,7 @@ void Layer::computeSimulations() inputs.append( value ); ++index; } - + SoftmaxConstraint::softmax( inputs, outputs ); _simulations[i][j] = outputs[outputIndex]; } @@ -724,27 +724,27 @@ void Layer::computeIntervalArithmeticBounds() case SIGN: computeIntervalArithmeticBoundsForSign(); break; - + case ROUND: computeIntervalArithmeticBoundsForRound(); break; - + case LEAKY_RELU: computeIntervalArithmeticBoundsForLeakyRelu(); break; - + case SIGMOID: computeIntervalArithmeticBoundsForSigmoid(); break; - + case MAX: computeIntervalArithmeticBoundsForMax(); break; - + case SOFTMAX: computeIntervalArithmeticBoundsForSoftmax(); break; - + case BILINEAR: computeIntervalArithmeticBoundsForBilinear(); break; @@ -925,7 +925,7 @@ void Layer::computeIntervalArithmeticBoundsForSign() double lb = sourceLayer->getLb( sourceIndex._neuron ); double ub = sourceLayer->getUb( sourceIndex._neuron ); - + double new_lb; double new_ub; @@ -944,7 +944,7 @@ void Layer::computeIntervalArithmeticBoundsForSign() new_lb = -1; new_ub = 1; } - + /* We now have the tightest bounds we can for the relu variable. If they are tigheter than what was previously @@ -1041,10 +1041,10 @@ void Layer::computeIntervalArithmeticBoundsForSigmoid() double lb = sourceLayer->getLb( sourceIndex._neuron ); double ub = sourceLayer->getUb( sourceIndex._neuron ); - + double lbSigmoid = SigmoidConstraint::sigmoid( lb ); double ubSigmoid = SigmoidConstraint::sigmoid( ub ); - + if ( _lb[i] < lbSigmoid ) { @@ -1073,10 +1073,10 @@ void Layer::computeIntervalArithmeticBoundsForRound() double lb = sourceLayer->getLb( sourceIndex._neuron ); double ub = sourceLayer->getUb( sourceIndex._neuron ); - + double lbRound = FloatUtils::round( lb ); double ubRound = FloatUtils::round( ub ); - + if ( _lb[i] < lbRound ) { @@ -1116,7 +1116,7 @@ void Layer::computeIntervalArithmeticBoundsForMax() unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); - + sourceLbs[sourceIndex] = sourceLb; sourceUbs[sourceIndex] = sourceUb; @@ -1155,7 +1155,7 @@ void Layer::computeIntervalArithmeticBoundsForMax() _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - + if ( _ub[i] > sourceUbs[indexOfMaxLowerBound] ) { _ub[i] = sourceUbs[indexOfMaxLowerBound]; @@ -1173,7 +1173,7 @@ void Layer::computeIntervalArithmeticBoundsForMax() _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - + if ( _ub[i] > maxUpperBound ) { _ub[i] = maxUpperBound; @@ -1195,9 +1195,9 @@ void Layer::computeIntervalArithmeticBoundsForSoftmax() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - + ASSERT( sourceLayer->getSize() == _size ); - + Vector sourceLbs; Vector sourceUbs; for ( const auto &sourceIndex : sources ) @@ -1205,7 +1205,7 @@ void Layer::computeIntervalArithmeticBoundsForSoftmax() unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); - + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); } @@ -1250,9 +1250,9 @@ void Layer::computeIntervalArithmeticBoundsForBilinear() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); - + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - + Vector sourceLbs; Vector sourceUbs; Vector sourceValues; @@ -1262,10 +1262,10 @@ void Layer::computeIntervalArithmeticBoundsForBilinear() unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); - - sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - + + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) { allConstant = false; @@ -1276,19 +1276,19 @@ void Layer::computeIntervalArithmeticBoundsForBilinear() sourceValues.append( sourceValue ); } } - + if ( allConstant ) { // If the both source neurons have been eliminated, this neuron is constant double value = sourceValues[0] * sourceValues[1]; - + if ( _lb[i] < value ) { _lb[i] = value; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - + if ( _ub[i] > value ) { _ub[i] = value; @@ -1297,7 +1297,7 @@ void Layer::computeIntervalArithmeticBoundsForBilinear() } continue; } - + double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); List values = { sourceLbs[0] * sourceLbs[1], @@ -1311,8 +1311,8 @@ void Layer::computeIntervalArithmeticBoundsForBilinear() if ( v > ub ) ub = v; } - - + + if ( _lb[i] < lb ) { _lb[i] = lb; @@ -1352,31 +1352,31 @@ void Layer::computeSymbolicBounds() case ABSOLUTE_VALUE: computeSymbolicBoundsForAbsoluteValue(); break; - + case LEAKY_RELU: computeSymbolicBoundsForLeakyRelu(); break; - + case ROUND: computeSymbolicBoundsForRound(); break; - + case SIGMOID: computeSymbolicBoundsForSigmoid(); break; - + case MAX: computeSymbolicBoundsForMax(); break; - + case SOFTMAX: computeSymbolicBoundsForSoftmax(); break; - + case BILINEAR: computeSymbolicBoundsForBilinear(); break; - + default: computeSymbolicBoundsDefault(); break; @@ -1554,19 +1554,19 @@ void Layer::computeSymbolicBoundsForRelu() { for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicLb[j * _size + i] = 0; - + _symbolicLowerBias[i] = 0; } else - { + { for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicLb[j * _size + i] = _symbolicLb[j * _size + i] * _symbolicUbOfLb[i] / ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); - + _symbolicLowerBias[i] = _symbolicLowerBias[i] * _symbolicUbOfLb[i] / ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); } - + _symbolicLbOfLb[i] = 0; } else @@ -1698,7 +1698,7 @@ void Layer::computeSymbolicBoundsForSign() { PhaseStatus upperSignPhase = PHASE_NOT_FIXED; PhaseStatus lowerSignPhase = PHASE_NOT_FIXED; - + // If we got here, we know that lbLb < 0 and ubUb // > 0 @@ -1712,7 +1712,7 @@ void Layer::computeSymbolicBoundsForSign() _symbolicUb[j * _size + i] = 0; _symbolicUpperBias[i] = 1; - + upperSignPhase = SIGN_PHASE_POSITIVE; } else @@ -1757,8 +1757,8 @@ void Layer::computeSymbolicBoundsForSign() _symbolicLowerBias[i] *= factor; _symbolicLowerBias[i] -= 1; } - - if (upperSignPhase == PHASE_NOT_FIXED) + + if ( upperSignPhase == PHASE_NOT_FIXED ) { _symbolicUbOfUb[i] = 1; _symbolicLbOfUb[i] = -1; @@ -1768,8 +1768,8 @@ void Layer::computeSymbolicBoundsForSign() _symbolicUbOfUb[i] = 1; _symbolicLbOfUb[i] = 1; } - - if (lowerSignPhase == PHASE_NOT_FIXED) + + if ( lowerSignPhase == PHASE_NOT_FIXED ) { _symbolicUbOfLb[i] = 1; _symbolicLbOfLb[i] = -1; @@ -1961,7 +1961,7 @@ void Layer::computeSymbolicBoundsForAbsoluteValue() void Layer::computeSymbolicBoundsForLeakyRelu() { ASSERT( _alpha > 0 && _alpha < 1 ); - + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); @@ -2052,11 +2052,11 @@ void Layer::computeSymbolicBoundsForLeakyRelu() { _symbolicUb[j * _size + i] *= weight; } - + // Do the same for the bias, and then adjust _symbolicUpperBias[i] *= weight; _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; - + // For the lower bound, in general, x_f >= lambda * x_b, where // 0 <= lambda <= 1, would be a sound lower bound. We @@ -2068,7 +2068,7 @@ void Layer::computeSymbolicBoundsForLeakyRelu() // lambda = 1 // Symbolic lower bound: x_f >= x_b // Concrete lower bound: x_f >= sourceLb - + // Lower bounds are passed as is } else @@ -2076,12 +2076,12 @@ void Layer::computeSymbolicBoundsForLeakyRelu() // lambda = 1 // Symbolic lower bound: x_f >= _alpha x_b // Concrete lower bound: x_f >= 0 - + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { _symbolicLb[j * _size + i] *= _alpha; } - + _symbolicLowerBias[i] *= _alpha; } } @@ -2091,7 +2091,7 @@ void Layer::computeSymbolicBoundsForLeakyRelu() { _symbolicLb[j * _size + i] *= weight; } - + // Do the same for the bias, and then adjust _symbolicLowerBias[i] *= weight; _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; @@ -2106,11 +2106,11 @@ void Layer::computeSymbolicBoundsForLeakyRelu() { _symbolicUb[j * _size + i] *= _alpha; } - + _symbolicUpperBias[i] *= _alpha; } } - + /* We now have the symbolic representation for the current layer. Next, we compute new lower and upper bounds for @@ -2126,9 +2126,9 @@ void Layer::computeSymbolicBoundsForLeakyRelu() { double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - + double entry = _symbolicLb[j * _size + i]; - + if ( entry >= 0 ) { _symbolicLbOfLb[i] += ( entry * inputLb ); @@ -2139,9 +2139,9 @@ void Layer::computeSymbolicBoundsForLeakyRelu() _symbolicLbOfLb[i] += ( entry * inputUb ); _symbolicUbOfLb[i] += ( entry * inputLb ); } - + entry = _symbolicUb[j * _size + i]; - + if ( entry >= 0 ) { _symbolicLbOfUb[i] += ( entry * inputLb ); @@ -2179,7 +2179,7 @@ void Layer::computeSymbolicBoundsForLeakyRelu() _symbolicUpperBias[i] *= _alpha; } } - + if ( _symbolicUbOfUb[i] > sourceUb ) _symbolicUbOfUb[i] = sourceUb; if ( _symbolicLbOfLb[i] < _alpha * sourceLb ) @@ -2233,7 +2233,7 @@ void Layer::computeSymbolicBoundsForSigmoid() ASSERT( _neuronToActivationSources.exists( i ) ); NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - + /* A Sigmoid initially "inherits" the symbolic bounds computed for its input variable @@ -2259,11 +2259,11 @@ void Layer::computeSymbolicBoundsForSigmoid() _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - + // Bounds of lb, ub are the Sigmoids of source lb, ub double sourceUbSigmoid = SigmoidConstraint::sigmoid( sourceUb ); double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); - + // Case when the Sigmoid constraint is fixed if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) { @@ -2272,16 +2272,16 @@ void Layer::computeSymbolicBoundsForSigmoid() _symbolicLb[j * _size + i] = 0; _symbolicUb[j * _size + i] = 0; } - + _symbolicLbOfUb[i] = sourceUbSigmoid; _symbolicUbOfUb[i] = sourceUbSigmoid; _symbolicLbOfLb[i] = sourceLbSigmoid; _symbolicUbOfLb[i] = sourceLbSigmoid; - + _symbolicUpperBias[i] = sourceUbSigmoid; _symbolicLowerBias[i] = sourceLbSigmoid; } - + // Sigmoid not fixed else { @@ -2296,7 +2296,7 @@ void Layer::computeSymbolicBoundsForSigmoid() { _symbolicLb[j * _size + i] *= lambda; } - + // Do the same for the bias, and then adjust _symbolicLowerBias[i] *= lambda; _symbolicLowerBias[i] += sourceLbSigmoid - lambda * sourceLb; @@ -2307,7 +2307,7 @@ void Layer::computeSymbolicBoundsForSigmoid() { _symbolicLb[j * _size + i] *= lambdaPrime; } - + // Do the same for the bias, and then adjust _symbolicLowerBias[i] *= lambdaPrime; _symbolicLowerBias[i] += sourceLbSigmoid - lambdaPrime * sourceLb; @@ -2320,7 +2320,7 @@ void Layer::computeSymbolicBoundsForSigmoid() { _symbolicUb[j * _size + i] *= lambda; } - + // Do the same for the bias, and then adjust _symbolicUpperBias[i] *= lambda; _symbolicUpperBias[i] += sourceUbSigmoid - lambda * sourceUb; @@ -2331,13 +2331,13 @@ void Layer::computeSymbolicBoundsForSigmoid() { _symbolicUb[j * _size + i] *= lambdaPrime; } - + // Do the same for the bias, and then adjust _symbolicUpperBias[i] *= lambdaPrime; _symbolicUpperBias[i] += sourceUbSigmoid - lambdaPrime * sourceUb; } - - + + /* We now have the symbolic representation for the current layer. Next, we compute new lower and upper bounds for @@ -2353,9 +2353,9 @@ void Layer::computeSymbolicBoundsForSigmoid() { double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - + double entry = _symbolicLb[j * _size + i]; - + if ( entry >= 0 ) { _symbolicLbOfLb[i] += ( entry * inputLb ); @@ -2366,9 +2366,9 @@ void Layer::computeSymbolicBoundsForSigmoid() _symbolicLbOfLb[i] += ( entry * inputUb ); _symbolicUbOfLb[i] += ( entry * inputLb ); } - + entry = _symbolicUb[j * _size + i]; - + if ( entry >= 0 ) { _symbolicLbOfUb[i] += ( entry * inputLb ); @@ -2381,12 +2381,12 @@ void Layer::computeSymbolicBoundsForSigmoid() } } } - + if ( _symbolicLbOfLb[i] < -1 ) _symbolicLbOfLb[i] = -1; if ( _symbolicUbOfUb[i] > 1 ) _symbolicUbOfUb[i] = 1; - + /* We now have the tightest bounds we can for the relu variable. If they are tigheter than what was previously @@ -2435,7 +2435,7 @@ void Layer::computeSymbolicBoundsForRound() ASSERT( _neuronToActivationSources.exists( i ) ); NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - + /* A Round initially "inherits" the symbolic bounds computed for its input variable @@ -2461,42 +2461,42 @@ void Layer::computeSymbolicBoundsForRound() _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - + + // Bounds of lb, ub are the rounded values of source lb, ub double sourceUbRound = FloatUtils::round( sourceUb ); double sourceLbRound = FloatUtils::round( sourceLb ); - + _symbolicLbOfUb[i] = sourceUbRound; _symbolicUbOfUb[i] = sourceUbRound; _symbolicLbOfLb[i] = sourceLbRound; _symbolicUbOfLb[i] = sourceLbRound; - - + + // Case when the Round constraint is fixed if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) { _symbolicUb[i] = 0; _symbolicUpperBias[i] = sourceUbRound; - + _symbolicLb[i] = 0; _symbolicLowerBias[i] = sourceLbRound; } - + // Round not fixed else { // Symbolic upper bound: x_f <= x_b + 0.5 // Concrete upper bound: x_f <= round(ub_b) - + _symbolicUpperBias[i] += 0.5; // Symbolic lower bound: x_f >= x_b - 0.5 // Concrete lower bound: x_f >= round(lb_b) - + _symbolicLowerBias[i] -= 0.5; } - + /* We now have the tightest bounds we can for the relu variable. If they are tigheter than what was previously @@ -2545,7 +2545,7 @@ void Layer::computeSymbolicBoundsForMax() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - + unsigned sourceLayerSize = sourceLayer->getSize(); const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); @@ -2561,7 +2561,7 @@ void Layer::computeSymbolicBoundsForMax() unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); - + sourceLbs[sourceIndex] = sourceLb; sourceUbs[sourceIndex] = sourceUb; @@ -2596,15 +2596,17 @@ void Layer::computeSymbolicBoundsForMax() // Concrete bound: lb_b <= x_f <= ub_b for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; - } - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[indexOfMaxLowerBound._neuron]; - _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[indexOfMaxLowerBound._neuron]; - - + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; + } + _symbolicLowerBias[i] = + sourceLayer->getSymbolicLowerBias()[indexOfMaxLowerBound._neuron]; + _symbolicUpperBias[i] = + sourceLayer->getSymbolicUpperBias()[indexOfMaxLowerBound._neuron]; + + _symbolicLbOfLb[i] = maxLowerBound; _symbolicUbOfLb[i] = maxLowerBound; _symbolicLbOfUb[i] = sourceUbs[indexOfMaxLowerBound]; @@ -2617,19 +2619,20 @@ void Layer::computeSymbolicBoundsForMax() // Concrete bounds: lb_b <= x_f <= maxUpperBound for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; - _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; + _symbolicUb[j * _size + i] = 0; } - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[indexOfMaxLowerBound._neuron]; + _symbolicLowerBias[i] = + sourceLayer->getSymbolicLowerBias()[indexOfMaxLowerBound._neuron]; _symbolicUpperBias[i] = maxUpperBound; - + _symbolicLbOfLb[i] = maxLowerBound; _symbolicUbOfLb[i] = maxLowerBound; _symbolicLbOfUb[i] = maxUpperBound; _symbolicUbOfUb[i] = maxUpperBound; } - + /* We now have the tightest bounds we can for the relu variable. If they are tigheter than what was previously @@ -2655,7 +2658,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { std::fill_n( _symbolicLowerBias, _size, 0 ); std::fill_n( _symbolicUpperBias, _size, 0 ); - + double *symbolicLb = new double[_size * _size]; double *symbolicUb = new double[_size * _size]; std::fill_n( symbolicLb, _size * _size, 0 ); @@ -2663,7 +2666,7 @@ void Layer::computeSymbolicBoundsForSoftmax() double *_work = new double[_size * _size]; std::fill_n( _work, _size * _size, 0 ); - + Set handledInputNeurons; unsigned sourceLayerSize = _size; SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); @@ -2690,10 +2693,10 @@ void Layer::computeSymbolicBoundsForSoftmax() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - + sourceLayerSize = sourceLayer->getSize(); ASSERT( sourceLayerSize == _size ); - + Vector sourceLbs; Vector sourceUbs; Vector sourceMids; @@ -2705,13 +2708,13 @@ void Layer::computeSymbolicBoundsForSoftmax() unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); - + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); sourceMids.append( ( sourceLb + sourceUb ) / 2 ); targetLbs.append( _lb[i] ); targetUbs.append( _ub[i] ); - + ++len; } @@ -2773,8 +2776,8 @@ void Layer::computeSymbolicBoundsForSoftmax() softmaxLSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &sourceIndex : sources ) { - double dldj = - softmaxdLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + double dldj = softmaxdLSELowerBound( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); symbolicLb[len * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; @@ -2786,20 +2789,21 @@ void Layer::computeSymbolicBoundsForSoftmax() softmaxLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &sourceIndex : sources ) { - double dldj = - softmaxdLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + double dldj = softmaxdLSELowerBound2( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); symbolicLb[len * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } } - _symbolicUpperBias[i] = softmaxLSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + _symbolicUpperBias[i] = + softmaxLSEUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &sourceIndex : sources ) { - double dudj = - softmaxdLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + double dudj = softmaxdLSEUpperbound( + sourceMids, targetLbs, targetUbs, index, inputIndex ); symbolicUb[len * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; @@ -2807,7 +2811,8 @@ void Layer::computeSymbolicBoundsForSoftmax() } else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) { - _symbolicLowerBias[i] = softmaxERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + _symbolicLowerBias[i] = + softmaxERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); unsigned inputIndex = 0; for ( const auto &sourceIndex : sources ) { @@ -2818,7 +2823,8 @@ void Layer::computeSymbolicBoundsForSoftmax() ++inputIndex; } - _symbolicUpperBias[i] = softmaxERUpperBound( sourceMids, targetLbs, targetUbs, index ); + _symbolicUpperBias[i] = + softmaxERUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &sourceIndex : sources ) { @@ -2831,7 +2837,7 @@ void Layer::computeSymbolicBoundsForSoftmax() } } } - + for ( const auto &sourceLayerEntry : _sourceLayers ) { const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerEntry.first ); @@ -2858,9 +2864,13 @@ void Layer::computeSymbolicBoundsForSoftmax() sourceLayerSize, _size ); if ( sourceLayer->getSymbolicLowerBias() ) - matrixMultiplication( - sourceLayer->getSymbolicLowerBias(), _work, _symbolicLowerBias, 1, sourceLayerSize, _size ); - + matrixMultiplication( sourceLayer->getSymbolicLowerBias(), + _work, + _symbolicLowerBias, + 1, + sourceLayerSize, + _size ); + for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) { if ( symbolicLb[i] < 0 ) @@ -2876,9 +2886,13 @@ void Layer::computeSymbolicBoundsForSoftmax() sourceLayerSize, _size ); if ( sourceLayer->getSymbolicLowerBias() ) - matrixMultiplication( - sourceLayer->getSymbolicUpperBias(), _work, _symbolicLowerBias, 1, sourceLayerSize, _size ); - + matrixMultiplication( sourceLayer->getSymbolicUpperBias(), + _work, + _symbolicLowerBias, + 1, + sourceLayerSize, + _size ); + for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) { if ( symbolicUb[i] > 0 ) @@ -2894,9 +2908,13 @@ void Layer::computeSymbolicBoundsForSoftmax() sourceLayerSize, _size ); if ( sourceLayer->getSymbolicUpperBias() ) - matrixMultiplication( - sourceLayer->getSymbolicUpperBias(), _work, _symbolicUpperBias, 1, sourceLayerSize, _size ); - + matrixMultiplication( sourceLayer->getSymbolicUpperBias(), + _work, + _symbolicUpperBias, + 1, + sourceLayerSize, + _size ); + for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) { if ( symbolicUb[i] < 0 ) @@ -2912,10 +2930,14 @@ void Layer::computeSymbolicBoundsForSoftmax() sourceLayerSize, _size ); if ( sourceLayer->getSymbolicUpperBias() ) - matrixMultiplication( - sourceLayer->getSymbolicLowerBias(), _work, _symbolicUpperBias, 1, sourceLayerSize, _size ); + matrixMultiplication( sourceLayer->getSymbolicLowerBias(), + _work, + _symbolicUpperBias, + 1, + sourceLayerSize, + _size ); } - + /* We now have the symbolic representation for the current layer. Next, we compute new lower and upper bounds for @@ -2964,7 +2986,7 @@ void Layer::computeSymbolicBoundsForSoftmax() } } } - + if ( symbolicLb ) { delete[] symbolicLb; @@ -2974,7 +2996,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { delete[] symbolicUb; symbolicUb = NULL; - } + } if ( _work ) { delete[] _work; @@ -3009,13 +3031,13 @@ void Layer::computeSymbolicBoundsForBilinear() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); - + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - + unsigned sourceLayerSize = sourceLayer->getSize(); const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - + Vector sourceLbs; Vector sourceUbs; Vector sourceValues; @@ -3028,10 +3050,10 @@ void Layer::computeSymbolicBoundsForBilinear() unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); - - sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - + + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) { allConstant = false; @@ -3041,7 +3063,7 @@ void Layer::computeSymbolicBoundsForBilinear() double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); sourceValues.append( sourceValue ); } - + if ( counter == 0 ) { indexA = sourceIndex._neuron; @@ -3052,7 +3074,7 @@ void Layer::computeSymbolicBoundsForBilinear() } ++counter; } - + if ( allConstant ) { // If the both source neurons have been eliminated, this neuron is constant @@ -3061,59 +3083,67 @@ void Layer::computeSymbolicBoundsForBilinear() _symbolicUb[j * _size + i] = 0; _symbolicLb[j * _size + i] = 0; } - + _symbolicUpperBias[i] = sourceValues[0] * sourceValues[1]; _symbolicLowerBias[i] = sourceValues[0] * sourceValues[1]; continue; } - + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { _symbolicUb[j * _size + i] = 0; _symbolicLb[j * _size + i] = 0; } - + // Symbolic lower bound: // out >= alpha * x + beta * y + gamma // where alpha = lb_y, beta = lb_x, gamma = -lb_x * lb_y - + // Symbolic upper bound: // out <= alpha * x + beta * y + gamma // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - if (sourceLbs[1] >= 0) - { - _symbolicLb[j * _size + i] += sourceLbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; - } - else - { - _symbolicLb[j * _size + i] += sourceLbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; - } - - if (sourceUbs[1] >= 0) - { - _symbolicUb[j * _size + i] += sourceUbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; - } - else - { - _symbolicLb[j * _size + i] += sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; - } - - if (sourceLbs[0] >= 0) - { - _symbolicLb[j * _size + i] += sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; - _symbolicUb[j * _size + i] += sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; - } - else - { - _symbolicLb[j * _size + i] += sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; - _symbolicUb[j * _size + i] += sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; - } + if ( sourceLbs[1] >= 0 ) + { + _symbolicLb[j * _size + i] += + sourceLbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + } + else + { + _symbolicLb[j * _size + i] += + sourceLbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + } + + if ( sourceUbs[1] >= 0 ) + { + _symbolicUb[j * _size + i] += + sourceUbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + } + else + { + _symbolicLb[j * _size + i] += + sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + } + + if ( sourceLbs[0] >= 0 ) + { + _symbolicLb[j * _size + i] += + sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += + sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + } + else + { + _symbolicLb[j * _size + i] += + sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += + sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; + } } _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; - + double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); List values = { sourceLbs[0] * sourceLbs[1], @@ -3127,7 +3157,7 @@ void Layer::computeSymbolicBoundsForBilinear() if ( v > ub ) ub = v; } - + /* We now have the symbolic representation for the current layer. Next, we compute new lower and upper bounds for @@ -3143,9 +3173,9 @@ void Layer::computeSymbolicBoundsForBilinear() { double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - + double entry = _symbolicLb[j * _size + i]; - + if ( entry >= 0 ) { _symbolicLbOfLb[i] += ( entry * inputLb ); @@ -3170,7 +3200,7 @@ void Layer::computeSymbolicBoundsForBilinear() _symbolicUbOfUb[i] += ( entry * inputLb ); } } - + /* We now have the tightest bounds we can for the relu variable. If they are tigheter than what was previously @@ -3366,16 +3396,17 @@ double Layer::calculateDifferenceFromSymbolic( Map &point, { double lower_sum = _symbolicLowerBias[i]; double upper_sum = _symbolicUpperBias[i]; - + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - const NeuronIndex neuron( 0, j ); - lower_sum += _symbolicLb[j * _size + i] * point[neuron]; - upper_sum += _symbolicUb[j * _size + i] * point[neuron]; + const NeuronIndex neuron( 0, j ); + lower_sum += _symbolicLb[j * _size + i] * point[neuron]; + upper_sum += _symbolicUb[j * _size + i] * point[neuron]; } - + const NeuronIndex currentNeuron( _layerIndex, i ); - return FloatUtils::max( point[currentNeuron] - upper_sum , 0 ) + FloatUtils::max( lower_sum - point[currentNeuron] , 0 ); + return FloatUtils::max( point[currentNeuron] - upper_sum, 0 ) + + FloatUtils::max( lower_sum - point[currentNeuron], 0 ); } void Layer::computeParameterisedSymbolicBounds( double coeff, bool receive ) @@ -3389,34 +3420,34 @@ void Layer::computeParameterisedSymbolicBounds( double coeff, bool receive ) case SIGN: computeParameterisedSymbolicBoundsForSign( coeff ); break; - + case LEAKY_RELU: computeParameterisedSymbolicBoundsForLeakyRelu( coeff ); break; - + default: computeSymbolicBounds(); break; } - + if ( receive ) { for ( unsigned i = 0; i < _size; ++i ) { - Map lower_polygonal_tightening; - Map upper_polygonal_tightening; - + Map lower_polygonal_tightening; + Map upper_polygonal_tightening; + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { const NeuronIndex neuron( 0, j ); lower_polygonal_tightening.insert( neuron, _symbolicLb[j * _size + i] ); upper_polygonal_tightening.insert( neuron, _symbolicUb[j * _size + i] ); } - - _layerOwner->receivePolygonalTighterBound( - PolygonalTightening( lower_polygonal_tightening, _symbolicLowerBias[i], PolygonalTightening::LB ) ); - _layerOwner->receivePolygonalTighterBound( - PolygonalTightening( upper_polygonal_tightening, _symbolicUpperBias[i], PolygonalTightening::UB ) ); + + _layerOwner->receivePolygonalTighterBound( PolygonalTightening( + lower_polygonal_tightening, _symbolicLowerBias[i], PolygonalTightening::LB ) ); + _layerOwner->receivePolygonalTighterBound( PolygonalTightening( + upper_polygonal_tightening, _symbolicUpperBias[i], PolygonalTightening::UB ) ); } } } @@ -3528,19 +3559,19 @@ void Layer::computeParameterisedSymbolicBoundsForRelu( double coeff ) { for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicLb[j * _size + i] *= coeff; - + _symbolicLowerBias[i] *= coeff; } else - { + { for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicLb[j * _size + i] = _symbolicLb[j * _size + i] * _symbolicUbOfLb[i] / ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); - + _symbolicLowerBias[i] = _symbolicLowerBias[i] * _symbolicUbOfLb[i] / ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); } - + _symbolicLbOfLb[i] = 0; } else @@ -3673,7 +3704,7 @@ void Layer::computeParameterisedSymbolicBoundsForSign( double coeff ) { PhaseStatus upperSignPhase = PHASE_NOT_FIXED; PhaseStatus lowerSignPhase = PHASE_NOT_FIXED; - + // If we got here, we know that lbLb < 0 and ubUb // > 0 @@ -3687,7 +3718,7 @@ void Layer::computeParameterisedSymbolicBoundsForSign( double coeff ) _symbolicUb[j * _size + i] = 0; _symbolicUpperBias[i] = 1; - + upperSignPhase = SIGN_PHASE_POSITIVE; } else @@ -3734,8 +3765,8 @@ void Layer::computeParameterisedSymbolicBoundsForSign( double coeff ) _symbolicLowerBias[i] *= factor; _symbolicLowerBias[i] -= 1; } - - if (upperSignPhase == PHASE_NOT_FIXED) + + if ( upperSignPhase == PHASE_NOT_FIXED ) { _symbolicUbOfUb[i] = 1; _symbolicLbOfUb[i] = -1; @@ -3745,8 +3776,8 @@ void Layer::computeParameterisedSymbolicBoundsForSign( double coeff ) _symbolicUbOfUb[i] = 1; _symbolicLbOfUb[i] = 1; } - - if (lowerSignPhase == PHASE_NOT_FIXED) + + if ( lowerSignPhase == PHASE_NOT_FIXED ) { _symbolicUbOfLb[i] = 1; _symbolicLbOfLb[i] = -1; @@ -3807,7 +3838,7 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) { ASSERT( _alpha > 0 && _alpha < 1 ); ASSERT( coeff >= 0 && coeff <= 1 ); - + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); @@ -3898,11 +3929,11 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) { _symbolicUb[j * _size + i] *= weight; } - + // Do the same for the bias, and then adjust _symbolicUpperBias[i] *= weight; _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; - + // For the lower bound, in general, x_f >= lambda * x_b, where // 0 <= lambda <= 1, would be a sound lower bound. We @@ -3914,20 +3945,20 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) // lambda = 1 // Symbolic lower bound: x_f >= x_b // Concrete lower bound: x_f >= sourceLb - + // Lower bounds are passed as is } else { - // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = alpha and lambda = 1). - // Symbolic lower bound: x_f >= ((1 - coeff) * weight + alpha) x_b - // Concrete lower bound: x_f >= 0 - + // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = + // alpha and lambda = 1). Symbolic lower bound: x_f >= ((1 - coeff) * weight + + // alpha) x_b Concrete lower bound: x_f >= 0 + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { _symbolicLb[j * _size + i] *= ( 1 - _alpha ) * coeff + _alpha; } - + _symbolicLowerBias[i] *= _alpha; } } @@ -3937,7 +3968,7 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) { _symbolicLb[j * _size + i] *= weight; } - + // Do the same for the bias, and then adjust _symbolicLowerBias[i] *= weight; _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; @@ -3952,11 +3983,11 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) { _symbolicUb[j * _size + i] *= _alpha; } - + _symbolicUpperBias[i] *= _alpha; } } - + /* We now have the symbolic representation for the current layer. Next, we compute new lower and upper bounds for @@ -3972,9 +4003,9 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) { double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - + double entry = _symbolicLb[j * _size + i]; - + if ( entry >= 0 ) { _symbolicLbOfLb[i] += ( entry * inputLb ); @@ -3985,9 +4016,9 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) _symbolicLbOfLb[i] += ( entry * inputUb ); _symbolicUbOfLb[i] += ( entry * inputLb ); } - + entry = _symbolicUb[j * _size + i]; - + if ( entry >= 0 ) { _symbolicLbOfUb[i] += ( entry * inputLb ); @@ -4025,7 +4056,7 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) _symbolicUpperBias[i] *= _alpha; } } - + if ( _symbolicUbOfUb[i] > sourceUb ) _symbolicUbOfUb[i] = sourceUb; if ( _symbolicLbOfLb[i] < _alpha * sourceLb ) @@ -4053,9 +4084,9 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) } double Layer::softmaxLSELowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { double sum = 0; for ( unsigned j = 0; j < inputs.size(); ++j ) @@ -4071,10 +4102,10 @@ double Layer::softmaxLSELowerBound( const Vector &inputs, } double Layer::softmaxdLSELowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) { double val = 0; if ( i == di ) @@ -4101,9 +4132,9 @@ double Layer::softmaxdLSELowerBound( const Vector &inputMids, } double Layer::softmaxLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { double max = FloatUtils::negativeInfinity(); unsigned maxInputIndex = 0; @@ -4143,10 +4174,10 @@ double Layer::softmaxLSELowerBound2( const Vector &inputMids, } double Layer::softmaxdLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) { double max = FloatUtils::negativeInfinity(); unsigned maxInputIndex = 0; @@ -4217,9 +4248,9 @@ double Layer::softmaxdLSELowerBound2( const Vector &inputMids, } double Layer::softmaxLSEUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -4233,10 +4264,10 @@ double Layer::softmaxLSEUpperBound( const Vector &inputs, } double Layer::softmaxdLSEUpperbound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -4251,9 +4282,9 @@ double Layer::softmaxdLSEUpperbound( const Vector &inputMids, } double Layer::softmaxERLowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { Vector inputTilda; SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); @@ -4278,10 +4309,10 @@ double Layer::softmaxERLowerBound( const Vector &inputs, } double Layer::softmaxdERLowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) { double val = softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); @@ -4309,9 +4340,9 @@ double Layer::softmaxdERLowerBound( const Vector &inputMids, } double Layer::softmaxERUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -4323,10 +4354,10 @@ double Layer::softmaxERUpperBound( const Vector &inputs, } double Layer::softmaxdERUpperBound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -4343,8 +4374,8 @@ double Layer::softmaxdERUpperBound( const Vector &inputMids, } double Layer::softmaxLinearLowerBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) + const Vector &inputUbs, + unsigned i ) { Vector uTilda; SoftmaxConstraint::xTilda( inputUbs, inputLbs[i], uTilda ); @@ -4353,8 +4384,8 @@ double Layer::softmaxLinearLowerBound( const Vector &inputLbs, } double Layer::softmaxLinearUpperBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) + const Vector &inputUbs, + unsigned i ) { Vector lTilda; SoftmaxConstraint::xTilda( inputLbs, inputUbs[i], lTilda ); diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index aa318e9181..ee9e1aba05 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -140,7 +140,7 @@ class Layer void computeParameterisedSymbolicBounds( double coeff, bool receive = false ); double calculateDifferenceFromSymbolic( Map &point, unsigned i ) const; void computeIntervalArithmeticBounds(); - + /* Preprocessing functionality: variable elimination and reindexing */ @@ -209,7 +209,7 @@ class Layer void allocateMemory(); void freeMemoryIfNeeded(); - + /* The following methods compute concrete softmax output bounds using different linear approximation, as well as the coefficients @@ -225,12 +225,12 @@ class Layer const Vector &inputUbs, unsigned i, unsigned di ); - + double softmaxLSELowerBound2( const Vector &inputMids, const Vector &inputLbs, const Vector &inputUbs, unsigned i ); - + double softmaxdLSELowerBound2( const Vector &inputMids, const Vector &inputLbs, const Vector &inputUbs, @@ -258,26 +258,26 @@ class Layer const Vector &inputUbs, unsigned i, unsigned di ); - + double softmaxERUpperBound( const Vector &inputs, const Vector &outputLb, const Vector &outputUb, unsigned i ); - + double softmaxdERUpperBound( const Vector &inputMids, const Vector &outputLb, const Vector &outputUb, unsigned i, unsigned di ); - + double softmaxLinearLowerBound( const Vector &inputLbs, const Vector &inputUbs, unsigned i ); - + double softmaxLinearUpperBound( const Vector &inputLbs, const Vector &inputUbs, unsigned i ); - + /* Helper functions for symbolic bound tightening */ @@ -293,8 +293,8 @@ class Layer void computeSymbolicBoundsForSoftmax(); void computeSymbolicBoundsForBilinear(); void computeSymbolicBoundsDefault(); - - + + /* Helper functions for parameterised symbolic bound tightening */ diff --git a/src/nlr/LayerOwner.h b/src/nlr/LayerOwner.h index 122c6a216a..b8ae03cb0f 100644 --- a/src/nlr/LayerOwner.h +++ b/src/nlr/LayerOwner.h @@ -17,8 +17,8 @@ #define __LayerOwner_h__ #include "ITableau.h" -#include "Tightening.h" #include "PolygonalTightening.h" +#include "Tightening.h" namespace NLR { diff --git a/src/nlr/MILPSolverBoundTighteningType.h b/src/nlr/MILPSolverBoundTighteningType.h new file mode 100644 index 0000000000..760f82e8d1 --- /dev/null +++ b/src/nlr/MILPSolverBoundTighteningType.h @@ -0,0 +1,46 @@ +/********************* */ +/*! \file MILPSolverBoundTighteningType.h +** \verbatim +** Top contributors (to current version): +** Guy Katz +** This file is part of the Marabou project. +** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS +** in the top-level source directory) and their institutional affiliations. +** All rights reserved. See the file COPYING in the top-level source +** directory for licensing information.\endverbatim +** +** [[ Add lengthier description here ]] + +**/ + +#ifndef __MILPSolverBoundTighteningType_h__ +#define __MILPSolverBoundTighteningType_h__ + +/* + MILP solver bound tightening options +*/ +enum class MILPSolverBoundTighteningType { + // Only encode pure linear constraints in the underlying + // solver, in a way that over-approximates the query + LP_RELAXATION = 0, + LP_RELAXATION_INCREMENTAL = 1, + // Encode linear and integer constraints in the underlying + // solver, in a way that completely captures the query but is + // more expensive to solve + MILP_ENCODING = 2, + MILP_ENCODING_INCREMENTAL = 3, + // Encode full queries and tries to fix relus until fix point + ITERATIVE_PROPAGATION = 4, + // Perform backward analysis + BACKWARD_ANALYSIS_ONCE = 5, + BACKWARD_ANALYSIS_CONVERGE = 6, + // Perform backward analysis using the INVPROP Algorithm (arXiv:2302.01404v4 [cs.LG]) + BACKWARD_ANALYSIS_INVPROP = 7, + // Perform backward analysis using the PreimageApproximation Algorithm (arXiv:2305.03686v4 + // [cs.SE]) + BACKWARD_ANALYSIS_PREIMAGE_APPROX = 8, + // Option to have no MILP bound tightening performed + NONE = 10, +}; + +#endif // __MILPSolverBoundTighteningType_h__ diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index d41ed22de4..2930770c6d 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -203,7 +203,8 @@ void NetworkLevelReasoner::receivePolygonalTighterBound( PolygonalTightening pol _polygonalBoundTightenings.append( polygonal_tightening ); } -void NetworkLevelReasoner::getConstraintPolygonalTightenings( List &polygonal_tightenings ) +void NetworkLevelReasoner::getConstraintPolygonalTightenings( + List &polygonal_tightenings ) { polygonal_tightenings = _polygonalBoundTightenings; _polygonalBoundTightenings.clear(); diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index f84f0f12ea..f6bf6cf72f 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -103,7 +103,7 @@ class NetworkLevelReasoner : public LayerOwner expressions and obtain tighter bounds (e.g., if the upper bound on the upper bound of a ReLU node is negative, that ReLU is inactive and its output can be set to 0. - + - Parametrised Symbolic: For certain activation functions, there is a continuum of valid symbolic bounds. We receive a map of coefficients in range [0, 1] for every layer index, then compute @@ -121,10 +121,10 @@ class NetworkLevelReasoner : public LayerOwner - getConstraintTightenings: this is the function that an external user calls in order to collect the tighter bounds discovered by the NLR. - + - receiveTighterPolygonalBound: this is a callback from the layer objects, through which they report tighter polygonal bounds. - + - getConstraintPolygonalTightenings: this is the function that an external user calls in order to collect the tighter polygonal bounds discovered by the NLR. @@ -148,7 +148,7 @@ class NetworkLevelReasoner : public LayerOwner void receiveTighterBound( Tightening tightening ); void getConstraintTightenings( List &tightenings ); void clearConstraintTightenings(); - + void receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ); void getConstraintPolygonalTightenings( List &polygonal_tightenings ); void clearConstraintPolygonalTightenings(); diff --git a/src/nlr/PolygonalTightening.h b/src/nlr/PolygonalTightening.h new file mode 100644 index 0000000000..0f811f6fe6 --- /dev/null +++ b/src/nlr/PolygonalTightening.h @@ -0,0 +1,100 @@ +/********************* */ +/*! \file PolygonalTightening.h + ** \verbatim + ** Top contributors (to current version): + ** Duligur Ibeling, Guy Katz, Ido Shmuel + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + +**/ + +#ifndef __PolygonalTightening_h__ +#define __PolygonalTightening_h__ + +#include "Map.h" +#include "NeuronIndex.h" + +#include + +class PolygonalTightening +{ +public: + enum PolygonalBoundType { + LB = 0, + UB = 1, + }; + + PolygonalTightening( Map neuronToCoefficient, + double value, + PolygonalBoundType type ) + : _neuronToCoefficient( neuronToCoefficient ) + , _value( value ) + , _type( type ) + { + } + + /* + The coefficient of each neuron. + */ + Map _neuronToCoefficient; + + /* + Its new value. + */ + double _value; + + /* + Whether the tightening tightens the + lower bound or the upper bound. + */ + PolygonalBoundType _type; + + /* + Equality operator. + */ + bool operator==( const PolygonalTightening &other ) const + { + bool allFound = true; + for ( const auto &pair : _neuronToCoefficient ) + { + bool currentFound = false; + for ( const auto &otherPair : other._neuronToCoefficient ) + { + currentFound |= ( pair.first._layer == otherPair.first._layer && + pair.first._neuron == otherPair.first._neuron && + pair.second == otherPair.second ); + } + allFound &= currentFound; + } + bool result = allFound && _value == other._value && _type == other._type; + return result; + } + + void dump() const + { + printf( "PolygonalTightening: x %s %.2lf\n", _type == LB ? ">=" : "<=", _value ); + + for ( const auto &pair : _neuronToCoefficient ) + { + printf( "NeuronIndex: (layer %u, neuron %u), Coefficient: %.2lf )\n", + pair.first._layer, + pair.first._neuron, + pair.second ); + } + } +}; + +#endif // __PolygonalTightening_h__ + +// +// Local Variables: +// compile-command: "make -C ../.. " +// tags-file-name: "../../TAGS" +// c-basic-offset: 4 +// End: +// s diff --git a/src/nlr/Test_NetworkLevelReasoner.h b/src/nlr/Test_NetworkLevelReasoner.h new file mode 100644 index 0000000000..977faa4ed6 --- /dev/null +++ b/src/nlr/Test_NetworkLevelReasoner.h @@ -0,0 +1,12348 @@ +/********************* */ +/*! \file Test_NetworkLevelReasoner.h + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Andrew Wu + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + +**/ + +#include "../../engine/tests/MockTableau.h" // TODO: fix this +#include "DeepPolySoftmaxElement.h" +#include "FloatUtils.h" +#include "Layer.h" +#include "NetworkLevelReasoner.h" +#include "Options.h" +#include "Query.h" +#include "Tightening.h" +#include "Vector.h" + +#include + +class MockForNetworkLevelReasoner +{ +public: +}; + +class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite +{ +public: + MockForNetworkLevelReasoner *mock; + + void setUp() + { + TS_ASSERT( mock = new MockForNetworkLevelReasoner ); + } + + void tearDown() + { + TS_ASSERT_THROWS_NOTHING( delete mock ); + } + + void populateNetwork( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithSigmoids( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithAbs( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithSign( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithRound( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithLeakyRelu( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + + void populateNetworkWithMax( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x b e + g + y c f + d + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::MAX, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -2 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 2 ); + nlr.setWeight( 0, 1, 1, 3, -3 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 0, 2 ); + + // Mark the Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void populateNetworkWithSoftmax( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SOFTMAX, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithBilinear( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x b e + g + y c f + d + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::BILINEAR, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -2 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 2 ); + nlr.setWeight( 0, 1, 1, 3, -3 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 0, 2 ); + + // Mark the Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void populateNetworkWithAbsAndRelu( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round/Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithRoundAndSign( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round/Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithLeakyReluAndSigmoid( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the LeakyReLU/Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d + b f + y e + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void populateNetworkWithReluAndBilinear( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d + b f + y e + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void populateNetworkSBTRelu( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + 2 R 1 + x0 --- x2 ---> x4 --- x6 + \ / / + 1 \ / / + \/ -1 / + /\ / + 3 / \ / + / \ R / + x1 --- x3 ---> x5 + 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + } + + void populateNetworkSBTReluResidual1( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + -1 + __________________ + / \ + / 1 R -1 1 R 3 1 + x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 + \ / + \ 3 / + \________________________/ + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 2, NLR::Layer::RELU, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 4, NLR::Layer::RELU, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 1, 5 ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 3 ); + nlr.setWeight( 0, 0, 3, 0, -1 ); + nlr.setWeight( 1, 0, 5, 0, 3 ); + + + nlr.setBias( 3, 0, 1 ); + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 6 ); + tableau.setLowerBound( 1, -large ); + tableau.setUpperBound( 1, large ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + } + + void populateNetworkSBTReluResidual2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + -1 + __________________ + / \ + / 1 R -1 1 R 3 1 1 + x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 --- x6 + \ / + \ 1 / + \_______________________________/ + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 2, NLR::Layer::RELU, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 4, NLR::Layer::RELU, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 6, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 6; ++i ) + nlr.addLayerDependency( i - 1, i ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 0, 5 ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 3 ); + nlr.setWeight( 0, 0, 3, 0, -1 ); + nlr.setWeight( 0, 0, 5, 0, 1 ); + nlr.setWeight( 5, 0, 6, 0, 1 ); + + nlr.setBias( 3, 0, 1 ); + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 1, -large ); + tableau.setUpperBound( 1, large ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + } + + void populateNetworkSBTReluReindex( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 1 1 1 + x0 --- x2 x5 --- x6 x9 --- x10 + \ /\ /\ / \ / \ / + 1 \ / R\ /-1\ / R \ / 1 \ / + \/ \/ \/ \/ \/ + /\ /\ /\ /\ /\ + 1 / \ R/ \ 1/ \ R / \ 1 / \ + / \/ \/ \ / \ / 0 \ + x1 --- x3 x4 --- x7 x8 --- x11 + -1 1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 0 ); + + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 8 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkSBTLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 LR 1 LR 1 1 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 1 \ / 0 \ / + \/ \/ \/ + /\ /\ /\ + 1 / \ 1 / \ 1 / \ + / \ LR / \ LR / 1 \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + -1 -1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + using LeakyReLU activation instead of ReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.2 ); + nlr.getLayer( 4 )->setAlpha( 0.2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 0 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + + nlr.setBias( 5, 0, 1 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkSBTSigmoidsAndRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 S 1 Rd + x0 --- x2 ---> x4 --- x6 --- x8 + \ / \ / + 1 \ / 1 \ / + \/ \/ + /\ /\ + 1 / \ 1 / \ + / \ S / \ Rd + x1 --- x3 ---> x5 --- x7 --- x9 + -1 -1 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 4; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 10 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + } + + void populateNetworkSBTMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 R Max 2 + x0 --- x2 ---> x4 --- x6 ---> x7 + \ / / + 1 \ / / + \/ / + /\ / + 1 / \ / + / \ R / + x1 --- x3 ---> x5 + -1 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::MAX, 1 ); + nlr.addLayer( 4, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 4; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + nlr.setWeight( 3, 0, 4, 0, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Mark the Max sources + nlr.addActivationSource( 2, 0, 3, 0 ); + nlr.addActivationSource( 2, 1, 3, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 7 ); + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 8 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + } + + void populateNetworkSBTSoftmax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + + x0 x3 S x6 + + x1 x4 S x7 + + x2 x5 S x8 + + x3 = x0 - x1 + x2 + 1 + x4 = -x0 + x1 + x2 + 2 + x5 = -x0 - x1 - x2 + 3 + + x6 x7 x8 = softmax(x3, x4, x5) + + x9 = x6 + x7 + x8 + x10 = x6 + x7 + x8 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -1 ); + nlr.setWeight( 0, 0, 1, 2, -1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 2, -1 ); + nlr.setWeight( 0, 2, 1, 0, 1 ); + nlr.setWeight( 0, 2, 1, 1, 1 ); + nlr.setWeight( 0, 2, 1, 2, -1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 2 ); + nlr.setBias( 1, 2, 3 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 8 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 11 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + } + + void populateNetworkSBTSoftmax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + + x0 x3 S x8 + + x1 x4 S x9 + + x2 x5 S x10 + + x6 S x11 + + x7 S x12 + + x3 = x0 - x1 + x2 + 1 + x4 = -x0 + x1 + x2 + 2 + x5 = -x0 - x1 - x2 + 3 + x6 = -x0 - x1 - x2 + 2 + x7 = -x0 - x1 - x2 + 1 + + x8 x10 x12 = softmax(x3, x5, x7) + + x9 x11 = softmax(x4, x6) + + x13 = x8 + x10 + x12 + x14 = -x8 - x10 - x12 + x15 = x9 + x11 + x16 = -x9 - x11 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 5 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 5 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 4 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -1 ); + nlr.setWeight( 0, 0, 1, 2, -1 ); + nlr.setWeight( 0, 0, 1, 3, -1 ); + nlr.setWeight( 0, 0, 1, 4, -1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 2, -1 ); + nlr.setWeight( 0, 1, 1, 3, -1 ); + nlr.setWeight( 0, 1, 1, 4, -1 ); + nlr.setWeight( 0, 2, 1, 0, 1 ); + nlr.setWeight( 0, 2, 1, 1, 1 ); + nlr.setWeight( 0, 2, 1, 2, -1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + nlr.setWeight( 0, 2, 1, 4, -1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 4, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + nlr.setWeight( 2, 4, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 2, 1 ); + nlr.setWeight( 2, 3, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 3, -1 ); + nlr.setWeight( 2, 3, 3, 3, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 2 ); + nlr.setBias( 1, 2, 3 ); + nlr.setBias( 1, 3, 2 ); + nlr.setBias( 1, 4, 1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 4, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 4, 2, 2 ); + nlr.addActivationSource( 1, 0, 2, 4 ); + nlr.addActivationSource( 1, 2, 2, 4 ); + nlr.addActivationSource( 1, 4, 2, 4 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 3 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 4 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 4 ), 12 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 3 ), 16 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 17 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + } + + void populateNetworkSBTBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + + x0 x2 + x x4 -- x5 + x1 x3 + + x2 = x0 - 2 * x1 + x3 = x0 + x1 + x4 = -x5 + + x4 = x2 * x3 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -2 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 5 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 6 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + } + + void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 R -1 R -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ R / \ R / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using ReLU activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardReLU2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = ReLU( x3 ) + x8 = ReLU( x4 ) + x9 = ReLU( x5 ) + x10 = ReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = ReLU( x11 ) + x15 = ReLU( x12 ) + x16 = ReLU( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = ReLU( x17 ) + x20 = ReLU( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::RELU, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 S -1 S -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ S / \ S / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Sigmoid activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardSigmoid2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Sigmoid( x3 ) + x8 = Sigmoid( x4 ) + x9 = Sigmoid( x5 ) + x10 = Sigmoid( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Sigmoid( x11 ) + x15 = Sigmoid( x12 ) + x16 = Sigmoid( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Sigmoid( x17 ) + x20 = Sigmoid( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 Sign -1 Sign -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ Sign / \ Sign / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Sign activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardSign2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Sign( x3 ) + x8 = Sign( x4 ) + x9 = SIgn( x5 ) + x10 = Sign( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Sign( x11 ) + x15 = Sign( x12 ) + x16 = Sign( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Sign( x17 ) + x20 = Sign( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 Rnd -1 Rnd -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ Rnd / \ Rnd / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Round activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardRound2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Round( x3 ) + x8 = Round( x4 ) + x9 = Round( x5 ) + x10 = Round( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Round( x11 ) + x15 = Round( x12 ) + x16 = Round( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Round( x17 ) + x20 = Round( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardAbs( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 A -1 A -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ A / \ A / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Absolute value activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardAbs2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Abs( x3 ) + x8 = Abs( x4 ) + x9 = Abs( x5 ) + x10 = Abs( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Abs( x11 ) + x15 = Abs( x12 ) + x16 = Abs( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Abs( x17 ) + x20 = Abs( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 LR -1 LR -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ LR / \ LR / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardLeakyRelu2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = LeakyReLU( x3 ) + x8 = LeakyReLU( x4 ) + x9 = LeakyReLU( x5 ) + x10 = LeakyReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = LeakyReLU( x11 ) + x15 = LeakyReLU( x12 ) + x16 = LeakyReLU( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = LeakyReLU( x17 ) + x20 = LeakyReLU( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + nlr.getLayer( 6 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + a a' + x e + b b' f h + y d + c c' + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + + void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 + x1 x12 x15 x17 + x5 x9 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7, x8, x9, x10 = Softmax( x2, x3, x4, x5 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14, x15, x16 = Softmax( x11, x12, x13 ) + + x17 = Max( x14, x15, x16 ) + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 5, NLR::Layer::MAX, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 3, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 2 ); + nlr.addActivationSource( 1, 0, 2, 3 ); + nlr.addActivationSource( 1, 1, 2, 3 ); + nlr.addActivationSource( 1, 2, 2, 3 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + nlr.addActivationSource( 3, 2, 4, 0 ); + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 1 ); + nlr.addActivationSource( 3, 0, 4, 2 ); + nlr.addActivationSource( 3, 1, 4, 2 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 4, 0, 5, 0 ); + nlr.addActivationSource( 4, 1, 5, 0 ); + nlr.addActivationSource( 4, 2, 5, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 18 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + } + + void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + a a' + x e + b b' f h + y d + c c' + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + x3 x7 + x0 + x4 x8 x11 x13 + x1 x15 + x5 x9 x12 x14 + x2 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = ReLU( x3 ) + x8 = ReLU( x4 ) + x9 = ReLU( x5 ) + x10 = ReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + + x13 = ReLU( x11 ) + x14 = ReLU( x12 ) + + x15 = x13 * x14 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::BILINEAR, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setBias( 3, 0, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + nlr.addActivationSource( 4, 0, 5, 0 ); + nlr.addActivationSource( 4, 1, 5, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 14 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 15 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 16 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + } + + void test_evaluate_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + double input[2]; + double output[2]; + + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With ReLUs, case 1 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); + + // With ReLUs, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); + } + + void test_evaluate_sigmoids() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSigmoids( nlr ); + + double input[2]; + double output[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); + + // case 3 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); + } + + void test_evaluate_abs() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbs( nlr ); + + double input[2]; + double output[2]; + + // With Abs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Abs, case 1 + input[0] = -2; + input[1] = -2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Abs, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); + } + + void test_evaluate_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSign( nlr ); + + double input[2]; + double output[2]; + + // With Sign, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Sign, case 1 + input[0] = -2; + input[1] = -2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Sign, case 2 (0 considered "non-negative", sign(0)=1) + input[0] = -1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); + } + + void test_evaluate_round() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRound( nlr ); + + double input[2]; + double output[2]; + + // With Round, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Round, case 1 + input[0] = 2.1; + input[1] = 1.4; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); + + // With Round, case 2 + input[0] = 2.1; + input[1] = 1.6; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); + } + + void test_evaluate_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyRelu( nlr ); + + double input[2]; + double output[2]; + + // With Leaky ReLU, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Leaky ReLU, case 1 (alpha=0.1) + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); + + // With Leaky ReLU, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); + } + + void test_evaluate_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithMax( nlr ); + + double input[2]; + double output[1]; + + // With Max, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); + + // With Max, case 1 + input[0] = 1; + input[1] = -3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); + + // With Max, case 2 + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); + } + + + void test_evaluate_softmax() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmax( nlr ); + + double input[2]; + double output[2]; + + // With Softmax, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); + + // With Softmax, case 1 + input[0] = 1; + input[1] = -3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); + + // With Softmax, case 2 + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); + } + + void test_evaluate_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithBilinear( nlr ); + + double input[2]; + double output[1]; + + // With Bilinear, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + + // With Bilinear, case 1 + input[0] = 0.1; + input[1] = -0.3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); + + // With Bilinear, case 2 + input[0] = -0.3; + input[1] = 0.3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); + } + + void test_evaluate_non_consecutive_layers() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 0, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Set the weights and relus + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 2, 3, 1, -2 ); + nlr.setWeight( 0, 1, 3, 1, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 2, 5, 0, 1 ); + + // Evaluate + double input[2]; + double output; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); + + input[0] = -1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); + + TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); + } + + void test_evaluate_abs_and_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbsAndRelu( nlr ); + + double input[2]; + double output[2]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + } + + void test_evaluate_round_and_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRoundAndSign( nlr ); + + double input[2]; + double output[2]; + + input[0] = 1.6; + input[1] = 1.4; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); + + input[0] = 1.6; + input[1] = 1.6; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); + } + + void test_evaluate_leaky_relu_and_sigmoid() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + double input[2]; + double output[2]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); + } + + void test_evaluate_softmax_and_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmaxAndMax( nlr ); + + double input[2]; + double output[1]; + + input[0] = 1; + input[1] = -3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); + + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); + } + + void test_evaluate_relu_and_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithReluAndBilinear( nlr ); + + double input[2]; + double output[1]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + } + + void test_store_into_other() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // With ReLUs, case 1 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_sigmoids() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSigmoids( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_round() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRound( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSign( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_abs() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbs( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyRelu( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithMax( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_softmax() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmax( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithBilinear( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_generate_input_query() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Variable indexing + + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + + // Set the weights and biases for the weighted sum layers + + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 0 ); + nlr.setBias( 1, 2, 0 ); + + nlr.setBias( 3, 0, 0 ); + nlr.setBias( 3, 1, 2 ); + + nlr.setBias( 5, 0, 0 ); + nlr.setBias( 5, 1, 0 ); + + // Mark the ReLU/Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Start the testing + Query ipq; + nlr.generateQuery( ipq ); + List unhandledEquations; + Set varsInUnhandledConstraints; + TS_ASSERT( + ipq.constructNetworkLevelReasoner( unhandledEquations, varsInUnhandledConstraints ) ); + NLR::NetworkLevelReasoner *reconstructedNlr = ipq.getNetworkLevelReasoner(); + + double input[2]; + double output[2]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + } + + void test_simulate_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With ReLUs, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With ReLUs, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 1 ) ); + } + + // With ReLUs, case 1 and 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0 ) ); + } + } + + void test_simulate_sigmoids() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSigmoids( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.6750, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 3.0167, + 0.0001 ) ); + } + + // case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.6032, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.5790, + 0.0001 ) ); + } + + // case 3 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.5045, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.1957, + 0.0001 ) ); + } + } + + void test_simulate_round() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRound( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Round, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Round, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 2.1 ) ); + simulations2.append( Vector( simulationSize, 1.4 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4, + 0.0001 ) ); + } + + // With Round, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 2.1 ) ); + simulations3.append( Vector( simulationSize, 1.6 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -12, + 0.0001 ) ); + } + } + + void test_simulate_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSign( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Sign, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Sign, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, -2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Sign, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -1 ) ); + simulations3.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4 ) ); + } + } + + void test_simulate_abs() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbs( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Abs, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Abs, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, -2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Abs, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 4 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 10 ) ); + } + } + + void test_simulate_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyRelu( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Leaky ReLU, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Leaky ReLU, case 1 (alpha=0.1) + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.9, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0.57, + 0.0001 ) ); + } + + // With Leaky ReLU, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -0.04, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -0.76, + 0.0001 ) ); + } + } + + void test_simulate_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithMax( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Max, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -3 ) ); + } + + // With Max, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, -3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -18 ) ); + } + + // With Max, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -3 ) ); + simulations3.append( Vector( simulationSize, 3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -5 ) ); + } + } + + void test_simulate_softmax() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmax( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Softmax, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.2999, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.4001, + 0.0001 ) ); + } + + // With Softmax, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, -3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.1192, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.7615, + 0.0001 ) ); + } + + // With Softmax, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -3 ) ); + simulations3.append( Vector( simulationSize, 3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.1206, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.7588, + 0.0001 ) ); + } + } + + void test_simulate_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithBilinear( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Bilinear, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } + + // With Bilinear, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 0.1 ) ); + simulations2.append( Vector( simulationSize, -0.3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2.8304, + 0.0001 ) ); + } + + // With Bilinear, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -0.3 ) ); + simulations3.append( Vector( simulationSize, 0.3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.0912, + 0.0001 ) ); + } + } + + void test_simulate_non_consecutive_layers() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 0, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Set the weights and relus + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 2, 3, 1, -2 ); + nlr.setWeight( 0, 1, 3, 1, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 2, 5, 0, 1 ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // Simulate1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2 ) ); + + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } + + void test_simulate_abs_and_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbsAndRelu( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // Simulate1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2 ) ); + } + + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 4 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + } + + void test_simulate_round_and_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRoundAndSign( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Round/Sign, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1.6 ) ); + simulations1.append( Vector( simulationSize, 1.4 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -2 ) ); + } + + // With Round/Sign, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1.6 ) ); + simulations2.append( Vector( simulationSize, 1.6 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4 ) ); + } + } + + void test_simulate_leaky_relu_and_sigmoid() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With LeakyReLU/Sigmoid, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.7109, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 1.4602, + 0.0001 ) ); + } + + // With LeakyReLU/Sigmoid, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.4013, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0.6508, + 0.0001 ) ); + } + } + + void test_simulate_softmax_and_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmaxAndMax( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With LeakyReLU/Sigmoid, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, -3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -2.9998, + 0.0001 ) ); + } + + // With LeakyReLU/Sigmoid, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -3 ) ); + simulations2.append( Vector( simulationSize, 3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1, + 0.0001 ) ); + } + } + + void test_simulate_relu_and_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithReluAndBilinear( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Relu/Bilinear, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + } + + // With ReLU/Bilinear, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } + } + + void test_interval_arithmetic_bound_propagation_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_abs_constraints() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Layer dependenices + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_sign_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSign( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, 3 ); + tableau.setUpperBound( 0, 4 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, 4, Tightening::LB ), Tightening( 2, 5, Tightening::UB ), + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 11, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, 1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, 1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, 4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_leaky_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithLeakyRelu( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), + + Tightening( 9, -0.28, Tightening::LB ), Tightening( 9, 10.1, Tightening::UB ), + Tightening( 11, -0.38, Tightening::LB ), Tightening( 11, 9.1, Tightening::UB ), + + Tightening( 12, -0.28, Tightening::LB ), Tightening( 12, 10.1, Tightening::UB ), + Tightening( 13, -1.42, Tightening::LB ), Tightening( 13, 37.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), + Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), + + Tightening( 9, -0.34, Tightening::LB ), Tightening( 9, 7.1, Tightening::UB ), + Tightening( 11, -0.32, Tightening::LB ), Tightening( 11, 7.3, Tightening::UB ), + + Tightening( 12, -0.34, Tightening::LB ), Tightening( 12, 7.1, Tightening::UB ), + Tightening( 13, -1.3, Tightening::LB ), Tightening( 13, 29, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_round_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithRound( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, 1.4 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, -1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), + Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), + + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + Tightening( 11, -7, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, -25, Tightening::LB ), Tightening( 13, 35, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3.1 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, 1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), + Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), + Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + + Tightening( 9, -16, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -15, Tightening::LB ), Tightening( 11, 2, Tightening::UB ), + + Tightening( 12, -16, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -61, Tightening::LB ), Tightening( 13, 7, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_sigmoid_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSigmoids( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.5000, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), + Tightening( 5, 0.0067, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), + Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.7311, Tightening::UB ), + + Tightening( 8, -0.2244, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), + Tightening( 10, 0.3948, Tightening::LB ), Tightening( 10, 2.2244, Tightening::UB ), + + Tightening( 9, 0.4441, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), + Tightening( 11, 0.5974, Tightening::LB ), Tightening( 11, 0.9024, Tightening::UB ), + + Tightening( 12, 0.4441, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), + Tightening( 13, 2.2364, Tightening::LB ), Tightening( 13, 3.5399, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.1192, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), + Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + + Tightening( 8, -0.7616, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), + Tightening( 10, 0.2384, Tightening::LB ), Tightening( 10, 2.6052, Tightening::UB ), + + Tightening( 9, 0.3183, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), + Tightening( 11, 0.5593, Tightening::LB ), Tightening( 11, 0.9312, Tightening::UB ), + + Tightening( 12, 0.3183, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), + Tightening( 13, 1.9963, Tightening::LB ), Tightening( 13, 3.6263, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_max_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithMax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 10, Tightening::UB ), + Tightening( 9, -8, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + + Tightening( 11, -10, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -8, Tightening::LB ), Tightening( 3, 9, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 9, Tightening::UB ), + Tightening( 7, -5, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 16, Tightening::UB ), + Tightening( 9, -14, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -5, Tightening::LB ), Tightening( 10, 16, Tightening::UB ), + + Tightening( 11, -16, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_softmax_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSoftmax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), + Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 9, 0.0240, Tightening::LB ), Tightening( 9, 0.8349, Tightening::UB ), + Tightening( 11, 0.1651, Tightening::LB ), Tightening( 11, 0.9759, Tightening::UB ), + + Tightening( 12, 0.0240, Tightening::LB ), Tightening( 12, 0.8349, Tightening::UB ), + Tightening( 13, 0.5192, Tightening::LB ), Tightening( 13, 3.7629, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), + + Tightening( 9, 0.0184, Tightening::LB ), Tightening( 9, 0.8678, Tightening::UB ), + Tightening( 11, 0.1322, Tightening::LB ), Tightening( 11, 0.9816, Tightening::UB ), + + Tightening( 12, 0.0184, Tightening::LB ), Tightening( 12, 0.8678, Tightening::UB ), + Tightening( 13, 0.4151, Tightening::LB ), Tightening( 13, 3.8125, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_bilinear_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithBilinear( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -0.1 ); + tableau.setUpperBound( 0, 0.1 ); + tableau.setLowerBound( 1, -0.1 ); + tableau.setUpperBound( 1, 0.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0.9, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), + Tightening( 3, -0.5, Tightening::LB ), Tightening( 3, 0.5, Tightening::UB ), + Tightening( 4, -0.3, Tightening::LB ), Tightening( 4, 0.3, Tightening::UB ), + Tightening( 5, -0.3, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), + + Tightening( 6, -0.55, Tightening::LB ), Tightening( 6, 0.55, Tightening::UB ), + Tightening( 7, -0.09, Tightening::LB ), Tightening( 7, 0.09, Tightening::UB ), + + Tightening( 8, 1.36, Tightening::LB ), Tightening( 8, 2.64, Tightening::UB ), + Tightening( 9, -0.64, Tightening::LB ), Tightening( 9, 0.64, Tightening::UB ), + + Tightening( 10, -1.6896, Tightening::LB ), Tightening( 10, 1.6896, Tightening::UB ), + + Tightening( 11, -1.6896, Tightening::LB ), Tightening( 11, 1.6896, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -0.3 ); + tableau.setUpperBound( 0, 0.1 ); + tableau.setLowerBound( 1, -0.1 ); + tableau.setUpperBound( 1, 0.2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, 0.7, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), + Tightening( 3, -0.8, Tightening::LB ), Tightening( 3, 0.9, Tightening::UB ), + Tightening( 4, -0.5, Tightening::LB ), Tightening( 4, 0.5, Tightening::UB ), + Tightening( 5, -0.6, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), + + Tightening( 6, -0.88, Tightening::LB ), Tightening( 6, 0.99, Tightening::UB ), + Tightening( 7, -0.3, Tightening::LB ), Tightening( 7, 0.3, Tightening::UB ), + + Tightening( 8, 0.82, Tightening::LB ), Tightening( 8, 3.29, Tightening::UB ), + Tightening( 9, -1.29, Tightening::LB ), Tightening( 9, 1.18, Tightening::UB ), + + Tightening( 10, -4.2441, Tightening::LB ), Tightening( 10, 3.8822, Tightening::UB ), + + Tightening( 11, -3.8822, Tightening::LB ), Tightening( 11, 4.2441, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_abs_and_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithAbsAndRelu( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -5, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + + Tightening( 10, -10, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_round_and_sign_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithRoundAndSign( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, 1.4 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, -1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), + Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), + + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3.1 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, 1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), + Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), + Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_leaky_relu_and_sigmoid_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), + + Tightening( 9, 0.0573, Tightening::LB ), Tightening( 9, 0.9999, Tightening::UB ), + Tightening( 11, 0.0219, Tightening::LB ), Tightening( 11, 0.9999, Tightening::UB ), + + Tightening( 12, 0.0573, Tightening::LB ), Tightening( 12, 0.9999, Tightening::UB ), + Tightening( 13, 0.1229, Tightening::LB ), Tightening( 13, 3.9996, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), + Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), + + Tightening( 9, 0.0323, Tightening::LB ), Tightening( 9, 0.9992, Tightening::UB ), + Tightening( 11, 0.0392, Tightening::LB ), Tightening( 11, 0.9993, Tightening::UB ), + + Tightening( 12, 0.0323, Tightening::LB ), Tightening( 12, 0.9992, Tightening::UB ), + Tightening( 13, 0.1498, Tightening::LB ), Tightening( 13, 3.9972, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_softmax_and_max_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSoftmaxAndMax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), + Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.0066, Tightening::LB ), + Tightening( 3, 0.9517, Tightening::UB ), + Tightening( 5, 0.0007, Tightening::LB ), + Tightening( 5, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), + Tightening( 7, 0.7297, Tightening::UB ), + + Tightening( 8, -0.7225, Tightening::LB ), + Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 9, 0.3192, Tightening::LB ), + Tightening( 9, 2.9819, Tightening::UB ), + + Tightening( 10, 0.3192, Tightening::LB ), + Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 11, -2.9819, Tightening::LB ), + Tightening( 11, -0.3192, Tightening::UB ) } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), + Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), + Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), + Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), + Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.0654, Tightening::LB ), + Tightening( 9, 2.9933, Tightening::UB ), + + Tightening( 10, 0.0654, Tightening::LB ), + Tightening( 10, 2.9933, Tightening::UB ), + + Tightening( 11, -2.9933, Tightening::LB ), + Tightening( 11, -0.0654, Tightening::UB ) } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_relu_and_bilinear_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithReluAndBilinear( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), + + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -14, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), + + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_sbt_relus_all_active() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both ReLUs active, bound survive through activations: + + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. + Coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = 0.5x0 + 1.25x1 - 11.25 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relus_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relu_residual1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual1( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [-1, 1] + + Layer 1: + + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. + Coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layer 2 (with residual from x0): + + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. + Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + x4.lb = 0 + x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] + x4 range: [0, 2.5] + + Layer 3 (with residual from x1): + + x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] + + x5 range: [-2, 4] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 2, Tightening::LB ), + Tightening( 5, 5.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relu_residual2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [-1, 1] + + Layer 1: + + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. + Coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layer 2 (with residual from x0): + + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. + Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + x4.lb = 0 + x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] + x4 range: [0, 2.5] + + Layer 3 (with residual from x0): + + x5.lb = 3 ( 0 ) + 1 ( x0 ) + 1 = 1x0 + 1 : [0, 2] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 1 ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 + = 7.5] x5 range: [0, 7.5] + + Layer 4: + x6.lb = 1x0 + 1 : [0, 2] + x6.ub = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] + x6 range: [0, 7.5] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 7.5, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 7.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relu_reindex() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluReindex( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both ReLUs are undecided, bounds are concretized. + Coefficient: 2/( 2--2 ) = 2/4 = 0.5 + + x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 + x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 + x4 range: [0, 2] + + x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 + x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 + x5 range: [0, 2] + + Layer 2: + + x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] + x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] + x6 range: [-1, 3] + + x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] + x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] + x7 range: [-2, 2] + + Both ReLUs are undecided, bounds are concretized. + Coefficient (first ReLU, lower): 1/( 1--1 ) = 1/2 = 0.5 + Coefficient (first ReLU, upper): 1 (propagated as is) + Coefficient (second ReLU, lower): 0 (bound is zero) + Coefficient (second ReLU, upper): 2/( 2--2 ) = 2/4 = 0.5 + + x8.lb = 0.5 ( x0 ) = 0.5x0 + x8.ub = x0 + 2 + x8 range: [0, 3] + + x9.lb = 0 + x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 + x9 range: [0, 2] + + Layer 3: + + x10.lb = 1 ( 0.5x0 ) + 1 ( 0 ) + 1 = 0.5x0 + 1 : [0.5, 1.5] + x10.ub = 1 ( x0 + 2 ) + 1 ( 0.5x1 + 1.5 ) + 1 = x0 + 0.5x1 + 4.5 : [3, 6] + x10 range: [0.5, 6] + + x11.lb = 0.5x1 - 0.5 + x11.ub = 0.5x1 + 1.5 + x11 range: [0, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, 0.5, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_abs_all_positive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both absolute values positive, bound survive through activations: + + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_abs_positive_and_negative() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_absolute_values_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is undecided, bounds are concretized. + Second absolute value is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_absolute_values_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2 is eliminated, everything set to -3 + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Second absolute value is positive, bounds surive the activation + + x4: all set to 3 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_signs_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is undecided, bounds are concretized. + Second sign is active, bounds become constant 1 + Coefficient (first Sign, lower): 2/12 = 1/6. + Coefficient (first Sign, upper): -2/-4 = 1/2. + + x4 range: [-1, 1] + x4.lb = 1/6 ( 2x0 + 3x1 - 15 ) - 1 = 2/6 x0 + 3/6 x1 - 21/6 + x4.ub = 1/2 ( 2x0 + 3x1 - 15 ) + 1 = x0 + 1.5x1 - 6.5 + + x5 range: [1, 1] + x5.lb = 1 + x5.ub = 1 + + Layer 2: + + x6.lb = 1 ( 2/6 x0 + 3/6 x1 - 21/6 ) - 1 ( 1 ) = 2/6 x0 + 3/6 x1 - 27/6 : [-16/6, 0] + x6.ub = 1 ( x0 + 1.5x1 - 6.5 ) - 1 ( 1 ) = x0 + 1.5x1 - 7.5 : [-2, 6] + + x6 range: [-8/3, 6] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2.6667, Tightening::LB ), + Tightening( 6, 6, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_signs_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2 is eliminated, everything set to -3 + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is negative, bounds become constant -1 + Second sign is positive, bounds become constant 1 + + x4: all set to -1 + + x5: all set to 1 + + Layer 2: + + x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 + x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, -1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Bias: ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 + + x4.lb = x0 + x1 + x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6x0 + 0.6x1 + 0.8 + x4 range: [-0.4, 2] + + x5.lb = x0 - x1 + x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6x0 - 0.6x1 + 0.8 + x5 range: [-0.4, 2] + + Layer 2: + + x6.lb = 1 ( x0 + x1 ) + 1 ( x0 - x1 ) = 2x0 : [-2, 2] + x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2x0 + 1.6 : [0.4, 2.8] + x6 range: [-2, 2.8] + + x7.lb = 1 ( x0 + x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( x0 - x1 ) = -0.4x0 + 1.6x1 + 0.8 : [-1.2, 2.8] + x7 range: [-2.8, 2.8] + + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient (first LeakyReLU): ( 2.8 - 0.2*-2 )/( 2.8--2 ) = 3.2/4.8 = 10/15 + Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2 / ( 2.8--2 ) = 14/15 + + Coefficient (second LeakyReLU): ( 2.8 - 0.2*-2.8 )/( 2.8--2.8 ) = 3.36/5.6 = 0.6 + Bias (second LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2.8 / ( 2.8--2.8 ) = 1.12 + + x8.lb = 2x0 + x8.ub = 10/15 ( 1.2x0 + 1.6 ) + 14/15 = 0.8x0 + 2 + x8 range: [-0.4, 2.8] + + x9.lb = 0.4x0 + 1.6x1 - 0.8 + x9.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 + x9 range: [-0.56, 2.8] + + Layer 3: + + x10.lb = 1 ( 0.4x0 + 1.6x1 - 0.8 ) + 1 ( 2x0 ) + 1 = 2.4x0 + 1.6x1 + 0.2 : [-3.8, 5.2] + x10.ub = 1 ( -0.24 x0 + 0.96 x1 + 1.6 ) + 1 ( 0.8x0 + 2 ) + 1 = 0.56x0 + 0.96x1 + 4.6 : + [3.08, 6.12] x10 range: [-3.8, 6.12] + + x11.lb = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x11.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 : [0.4, 2.8] + x11 range: [-2.8, 2.8] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.4, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -0.4, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), + Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), + + Tightening( 8, -0.4, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), + Tightening( 9, -0.56, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), + + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 6.12, Tightening::UB ), + Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_sigmoids_and_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSigmoidsAndRound( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + // Layer 1 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); + + // Layer 2 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); + + // Layer 3 + /* + Double-check with Python + --- + from math import exp as e + def g(x): + return 1 / (1 + e(-x)) + + def g_prime(x): + return g(x) * (1 - g(x)) + + def lam(l, u): + return (g(u) - g(l)) / (u - l) + + def lam_prime(l, u): + return min(g_prime(l), g_prime(u)) + + l3 = l4 = -2 + u3 = u4 = 2 + l5 = l6 = g(-2) + u5 = u6 = g(2) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) + x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) + x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) + x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) + print(x7_l) + print(x7_u) + print(x8_l) + print(x8_u) + --- + [output]: + 0.4483930148512481 + 1.5516069851487517 + -0.5516069851487517 + 0.5516069851487517 + */ + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); + + // Layer 4 + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + } + + void test_sbt_max_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 2] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 3] + x2.ub = x0 + x1 : [-2, 3] + + x3.lb = x0 - x1 : [-3, 2] + x3.ub = x0 - x1 : [-3, 2] + + Both ReLUs are undecided, bounds are concretized. + Coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6 + Coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 + + x4.lb = 0.6 ( x0 + x1 ) = 0.6x0 + 0.6x1 + x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 + x4 range: [0, 3] + + x5.lb = 0.4 ( x0 - x1 ) = 0.4x0 + 0.4x1 + x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 + x5 range: [0, 2] + + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x4, and its upper bound is constant 3. + + x6.lb = 0.6x0 + 0.6x1 : [-1.2, 1.8] + x6.ub = 3 : [3, 3] + x6 range: [-1.2, 3] + + Layer 3: + + x7.lb = 2 ( 0.6x0 + 0.6x1 ) = 1.2x0 + 1.8x1 : [-2.4, 3.6] + x7.ub = 2 ( 3 ) = 6 : [6, 6] + x7 range: [-2.4, 6] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 3, Tightening::UB ), + Tightening( 3, -3, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2.4, Tightening::LB ), + Tightening( 7, 6, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_max_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -3 ); + tableau.setUpperBound( 1, -2 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-3, -2] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 0] + x2.ub = x0 + x1 : [-2, 0] + + x3.lb = x0 - x1 : [3, 5] + x3.ub = x0 - x1 : [3, 5] + + First ReLU is negative, bounds become constant 0 + Second ReLU is positive, bounds survive the activation + + x4: all set to 0 + + x5.lb = x0 - x1 : [3, 5] + x5.ub = x0 - x1 : [3, 5] + + Max is fixed because x5.lb > x4.ub, it inherits x5's bounds + + x6.lb = x0 - x1 : [3, 5] + x6.ub = x0 - x1 : [3, 5] + + Layer 3: + + x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 0, Tightening::UB ), + Tightening( 3, 3, Tightening::LB ), + Tightening( 3, 5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 3, Tightening::LB ), + Tightening( 5, 5, Tightening::UB ), + Tightening( 6, 3, Tightening::LB ), + Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 6, Tightening::LB ), + Tightening( 7, 10, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_softmax1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + } + + void test_sbt_softmax2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + } + + void test_sbt_softmax3() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax2( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + + List expectedBounds( + { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), + Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), + Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), + Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), + Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), + Tightening( 13, 0.9470, Tightening::LB ), Tightening( 13, 0.9470, Tightening::UB ), + Tightening( 14, -0.9470, Tightening::LB ), Tightening( 14, -0.9470, Tightening::UB ), + Tightening( 15, 1.0253, Tightening::LB ), Tightening( 15, 1.0253, Tightening::UB ), + Tightening( 16, -1.0253, Tightening::LB ), Tightening( 16, -1.0253, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_softmax_bounds_er() + { + Vector inputLb = { -1, 0, 1 }; + Vector inputUb = { 0, 2, 4 }; + Vector input = { -0.5, 1, 2.5 }; + + double value = NLR::DeepPolySoftmaxElement::ERLowerBound( input, inputLb, inputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.0114799, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.00563867, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.000838421, 0.00001 ) ); + + + Vector outputLb = { 0.2, 0, 0 }; + Vector outputUb = { 0.4, 0.1, 0.1 }; + + value = NLR::DeepPolySoftmaxElement::ERUpperBound( input, outputLb, outputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, -1.44538, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 1.96538, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.358535, 0.00001 ) ); + } + + void test_softmax_bounds_lse1() + { + Vector inputLb = { -1, 0, 1 }; + Vector inputUb = { 0, 2, 3 }; + Vector input = { -0.5, 1, 2 }; + double value = NLR::DeepPolySoftmaxElement::LSELowerBound( input, inputLb, inputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.00703444, 0.001 ) ); + + Vector outputLb = { 0.2, 0, 0 }; + Vector outputUb = { 0.4, 0.1, 0.1 }; + value = NLR::DeepPolySoftmaxElement::LSEUpperBound( input, outputLb, outputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.164165, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.272204, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.073207, 0.00001 ) ); + } + + void test_sbt_bilinear() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-2, 1] + + Layer 1: + + x2.lb = x0 - 2x1 : [-1, 6] + x2.ub = x0 - 2x1 : [-1, 6] + + x3.lb = x0 + x1 : [-1, 3] + x3.ub = x0 + x1 : [-1, 3] + + Coefficients for bilinear layer: + Lower bound: + alpha_l = x3.lb = -1 + beta = x2.lb = -1 + gamma_l = -x2.lb * x3.lb = --1 * -1 = -1 + + Upper bound: + alpha_u = x3.ub = 3 + beta = x2.lb = -1 + gamma_u = -x2.lb * x3.ub = --1 * 3 = 3 + + x4.lb = -1 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + -1 = -2x0 + x1 - 1 : [-7, -2] + x4.ub = 3 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + 3 = 2x0 - 7x1 + 3 : [0, 21] + x4 range: [-7, 21] + + Layer 3: + + x7.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] + x7.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] + x4 range: [-21, 5] + */ + + List expectedBounds( { Tightening( 2, -1, Tightening::LB ), + Tightening( 2, 6, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 3, Tightening::UB ), + Tightening( 4, -7, Tightening::LB ), + Tightening( 4, 21, Tightening::UB ), + Tightening( 5, -21, Tightening::LB ), + Tightening( 5, 7, Tightening::UB ) } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_concretize_input_assignment() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + + populateNetwork( nlr ); + + // With ReLUs, Inputs are zeros, only biases count + tableau.nextValues[0] = 0; + tableau.nextValues[1] = 0; + + Map assignment; + + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); + + TS_ASSERT( assignment.size() == 14 ); + TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 4 ) ); + + // With ReLUs, case 1 + tableau.nextValues[0] = 1; + tableau.nextValues[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); + + TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 1 ) ); + + // With ReLUs, case 2 + tableau.nextValues[0] = 1; + tableau.nextValues[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); + + TS_ASSERT( FloatUtils::areEqual( assignment[12], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 0 ) ); + } + + + void test_obtain_bound_from_ipq() + { + NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); + + Query query; + query.setNumberOfVariables( 14 ); + + + // Initialize the bounds + query.setLowerBound( 0, -1 ); + query.setUpperBound( 0, 1 ); + query.setLowerBound( 1, -1 ); + query.setUpperBound( 1, 1 ); + + double large = 1000; + query.setLowerBound( 2, -large ); + query.setUpperBound( 2, large ); + query.setLowerBound( 3, -large ); + query.setUpperBound( 3, large ); + query.setLowerBound( 4, -large ); + query.setUpperBound( 4, large ); + query.setLowerBound( 5, -large ); + query.setUpperBound( 5, large ); + query.setLowerBound( 6, -large ); + query.setUpperBound( 6, large ); + query.setLowerBound( 7, -large ); + query.setUpperBound( 7, large ); + query.setLowerBound( 8, -large ); + query.setUpperBound( 8, large ); + query.setLowerBound( 9, -large ); + query.setUpperBound( 9, large ); + query.setLowerBound( 10, -large ); + query.setUpperBound( 10, large ); + query.setLowerBound( 11, -large ); + query.setUpperBound( 11, large ); + query.setLowerBound( 12, -large ); + query.setUpperBound( 12, large ); + query.setLowerBound( 13, -large ); + query.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds( query ) ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : expectedBounds ) + TS_ASSERT( bounds.exists( bound ) ); + } + + void test_backwards_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 8, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 19, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 20, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sigmoid() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), + + Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), + Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), + + Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), + Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), + + Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), + Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), + Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), + + Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), + Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), + + Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), + Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), + + Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), + Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sigmoid2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), + Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), + + Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), + Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), + Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), + + Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), + Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), + Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), + + Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), + Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), + + Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), + Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), + + Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), + Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), + Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), + + Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), + Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), + Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), + + Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), + Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), + Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), + + Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), + Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), + + Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), + Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), + + Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_abs() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), + + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), + + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_abs2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), + + Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), + + Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 11, -4, Tightening::LB ), + Tightening( 11, 6, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), + + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -10, Tightening::LB ), + Tightening( 7, 5, Tightening::UB ), + + Tightening( 11, -14, Tightening::LB ), + Tightening( 11, 11, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_round2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), + Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), + Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), + + Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), + Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), + + Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), + Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), + + Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), + Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), + Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), + Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), + + Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 4, -0.1, Tightening::LB ), + + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_leaky_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 14, -0.9, Tightening::LB ), + Tightening( 15, -0.89, Tightening::LB ), + Tightening( 16, -0.77, Tightening::LB ), + + Tightening( 19, -2.3133, Tightening::LB ), + Tightening( 20, -1.2, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), + Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 14, -1.1, Tightening::LB ), + Tightening( 15, -2.1771, Tightening::LB ), + Tightening( 16, -1.15, Tightening::LB ), + + Tightening( 19, -5.6259, Tightening::LB ), + Tightening( 20, -1.9, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_softmax_and_max() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), + Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), + Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), + + Tightening( 8, -0.1519, Tightening::LB ), Tightening( 8, 1.4775, Tightening::UB ), + Tightening( 9, 0.6614, Tightening::LB ), Tightening( 9, 2.3899, Tightening::UB ), + + Tightening( 10, 0.6614, Tightening::LB ), Tightening( 10, 2.3899, Tightening::UB ), + + Tightening( 11, -2.3899, Tightening::LB ), Tightening( 11, -0.6614, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.0674, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), + + Tightening( 10, 0.0674, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), + + Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.0674, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_softmax_and_max2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), + Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), + Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), + Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), + + Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9635, Tightening::UB ), + Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9806, Tightening::UB ), + Tightening( 13, -2.9902, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), + + Tightening( 14, 0.1086, Tightening::LB ), Tightening( 14, 0.9971, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8766, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4595, Tightening::UB ), + + Tightening( 17, 0.1086, Tightening::LB ), Tightening( 17, 0.9971, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), + + Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9989, Tightening::UB ), + Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), + Tightening( 13, -2.9989, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), + + Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9972, Tightening::UB ), + Tightening( 15, 0.0024, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), + + Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9972, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_relu_and_bilinear2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + /*List expectedBounds2( + { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2.1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.35, Tightening::UB ), + Tightening( 11, 1.35, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) );*/ + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + /*List expectedBounds4( + { Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) );*/ + } + + bool boundsEqual( const List &bounds, const List &expectedBounds ) + { + if ( bounds.size() != expectedBounds.size() ) + return false; + + bool allFound = true; + for ( const auto &bound : bounds ) + { + bool currentFound = false; + for ( const auto &expectedBound : expectedBounds ) + { + currentFound |= + ( bound._type == expectedBound._type && + bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + } + allFound &= currentFound; + } + return allFound; + } + + void updateTableau( MockTableau &tableau, List &tightenings ) + { + ASSERT( tableau ); + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + { + tableau.setLowerBound( tightening._variable, tightening._value ); + } + + if ( tightening._type == Tightening::UB ) + { + tableau.setUpperBound( tightening._variable, tightening._value ); + } + } + } +}; From 22f0eaa7cb580a63b84bbe8b36a490d07a22d8f4 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Dec 2024 20:55:32 +0200 Subject: [PATCH 29/81] Delete src/nlr/PolygonalTightening.h --- src/nlr/PolygonalTightening.h | 100 ---------------------------------- 1 file changed, 100 deletions(-) delete mode 100644 src/nlr/PolygonalTightening.h diff --git a/src/nlr/PolygonalTightening.h b/src/nlr/PolygonalTightening.h deleted file mode 100644 index 0f811f6fe6..0000000000 --- a/src/nlr/PolygonalTightening.h +++ /dev/null @@ -1,100 +0,0 @@ -/********************* */ -/*! \file PolygonalTightening.h - ** \verbatim - ** Top contributors (to current version): - ** Duligur Ibeling, Guy Katz, Ido Shmuel - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - -**/ - -#ifndef __PolygonalTightening_h__ -#define __PolygonalTightening_h__ - -#include "Map.h" -#include "NeuronIndex.h" - -#include - -class PolygonalTightening -{ -public: - enum PolygonalBoundType { - LB = 0, - UB = 1, - }; - - PolygonalTightening( Map neuronToCoefficient, - double value, - PolygonalBoundType type ) - : _neuronToCoefficient( neuronToCoefficient ) - , _value( value ) - , _type( type ) - { - } - - /* - The coefficient of each neuron. - */ - Map _neuronToCoefficient; - - /* - Its new value. - */ - double _value; - - /* - Whether the tightening tightens the - lower bound or the upper bound. - */ - PolygonalBoundType _type; - - /* - Equality operator. - */ - bool operator==( const PolygonalTightening &other ) const - { - bool allFound = true; - for ( const auto &pair : _neuronToCoefficient ) - { - bool currentFound = false; - for ( const auto &otherPair : other._neuronToCoefficient ) - { - currentFound |= ( pair.first._layer == otherPair.first._layer && - pair.first._neuron == otherPair.first._neuron && - pair.second == otherPair.second ); - } - allFound &= currentFound; - } - bool result = allFound && _value == other._value && _type == other._type; - return result; - } - - void dump() const - { - printf( "PolygonalTightening: x %s %.2lf\n", _type == LB ? ">=" : "<=", _value ); - - for ( const auto &pair : _neuronToCoefficient ) - { - printf( "NeuronIndex: (layer %u, neuron %u), Coefficient: %.2lf )\n", - pair.first._layer, - pair.first._neuron, - pair.second ); - } - } -}; - -#endif // __PolygonalTightening_h__ - -// -// Local Variables: -// compile-command: "make -C ../.. " -// tags-file-name: "../../TAGS" -// c-basic-offset: 4 -// End: -// s From 24f2be1278c0a2ee7afeac8830e8bee33e33dc17 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Dec 2024 20:56:07 +0200 Subject: [PATCH 30/81] Delete src/nlr/Test_NetworkLevelReasoner.h --- src/nlr/Test_NetworkLevelReasoner.h | 12348 -------------------------- 1 file changed, 12348 deletions(-) delete mode 100644 src/nlr/Test_NetworkLevelReasoner.h diff --git a/src/nlr/Test_NetworkLevelReasoner.h b/src/nlr/Test_NetworkLevelReasoner.h deleted file mode 100644 index 977faa4ed6..0000000000 --- a/src/nlr/Test_NetworkLevelReasoner.h +++ /dev/null @@ -1,12348 +0,0 @@ -/********************* */ -/*! \file Test_NetworkLevelReasoner.h - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz, Andrew Wu - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - -**/ - -#include "../../engine/tests/MockTableau.h" // TODO: fix this -#include "DeepPolySoftmaxElement.h" -#include "FloatUtils.h" -#include "Layer.h" -#include "NetworkLevelReasoner.h" -#include "Options.h" -#include "Query.h" -#include "Tightening.h" -#include "Vector.h" - -#include - -class MockForNetworkLevelReasoner -{ -public: -}; - -class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -{ -public: - MockForNetworkLevelReasoner *mock; - - void setUp() - { - TS_ASSERT( mock = new MockForNetworkLevelReasoner ); - } - - void tearDown() - { - TS_ASSERT_THROWS_NOTHING( delete mock ); - } - - void populateNetwork( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithSigmoids( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithAbs( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithSign( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithRound( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Round sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithLeakyRelu( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - - void populateNetworkWithMax( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x b e - g - y c f - d - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::MAX, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::MAX, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, -2 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 2 ); - nlr.setWeight( 0, 1, 1, 3, -3 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 0, 2 ); - - // Mark the Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - } - - void populateNetworkWithSoftmax( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SOFTMAX, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Softmax sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 0, 4, 1 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithBilinear( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x b e - g - y c f - d - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::BILINEAR, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, -2 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 2 ); - nlr.setWeight( 0, 1, 1, 3, -3 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 0, 2 ); - - // Mark the Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - } - - void populateNetworkWithAbsAndRelu( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Round/Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithRoundAndSign( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Round/Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithLeakyReluAndSigmoid( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the LeakyReLU/Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d - b f - y e - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::MAX, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - } - - void populateNetworkWithReluAndBilinear( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d - b f - y e - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU/Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - } - - void populateNetworkSBTRelu( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - 2 R 1 - x0 --- x2 ---> x4 --- x6 - \ / / - 1 \ / / - \/ -1 / - /\ / - 3 / \ / - / \ R / - x1 --- x3 ---> x5 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 7 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - } - - void populateNetworkSBTReluResidual1( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - -1 - __________________ - / \ - / 1 R -1 1 R 3 1 - x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 - \ / - \ 3 / - \________________________/ - - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); - nlr.addLayer( 2, NLR::Layer::RELU, 1 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - nlr.addLayer( 4, NLR::Layer::RELU, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 1, 5 ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 4, 0, 5, 0, 3 ); - nlr.setWeight( 0, 0, 3, 0, -1 ); - nlr.setWeight( 1, 0, 5, 0, 3 ); - - - nlr.setBias( 3, 0, 1 ); - nlr.setBias( 5, 0, 1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 6 ); - tableau.setLowerBound( 1, -large ); - tableau.setUpperBound( 1, large ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - } - - void populateNetworkSBTReluResidual2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - -1 - __________________ - / \ - / 1 R -1 1 R 3 1 1 - x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 --- x6 - \ / - \ 1 / - \_______________________________/ - - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); - nlr.addLayer( 2, NLR::Layer::RELU, 1 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - nlr.addLayer( 4, NLR::Layer::RELU, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - nlr.addLayer( 6, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 6; ++i ) - nlr.addLayerDependency( i - 1, i ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 0, 5 ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 4, 0, 5, 0, 3 ); - nlr.setWeight( 0, 0, 3, 0, -1 ); - nlr.setWeight( 0, 0, 5, 0, 1 ); - nlr.setWeight( 5, 0, 6, 0, 1 ); - - nlr.setBias( 3, 0, 1 ); - nlr.setBias( 5, 0, 1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 7 ); - tableau.setLowerBound( 1, -large ); - tableau.setUpperBound( 1, large ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - } - - void populateNetworkSBTReluReindex( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 1 1 1 - x0 --- x2 x5 --- x6 x9 --- x10 - \ /\ /\ / \ / \ / - 1 \ / R\ /-1\ / R \ / 1 \ / - \/ \/ \/ \/ \/ - /\ /\ /\ /\ /\ - 1 / \ R/ \ 1/ \ R / \ 1 / \ - / \/ \/ \ / \ / 0 \ - x1 --- x3 x4 --- x7 x8 --- x11 - -1 1 - - The example described in Fig. 3 of - https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 0 ); - - nlr.setBias( 5, 0, 1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - - nlr.addActivationSource( 3, 0, 4, 1 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 8 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkSBTLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 LR 1 LR 1 1 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 1 \ / 0 \ / - \/ \/ \/ - /\ /\ /\ - 1 / \ 1 / \ 1 / \ - / \ LR / \ LR / 1 \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - -1 -1 - - The example described in Fig. 3 of - https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf - using LeakyReLU activation instead of ReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.2 ); - nlr.getLayer( 4 )->setAlpha( 0.2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, 1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 0 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - - nlr.setBias( 5, 0, 1 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkSBTSigmoidsAndRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 S 1 Rd - x0 --- x2 ---> x4 --- x6 --- x8 - \ / \ / - 1 \ / 1 \ / - \/ \/ - /\ /\ - 1 / \ 1 / \ - / \ S / \ Rd - x1 --- x3 ---> x5 --- x7 --- x9 - -1 -1 - - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 4; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, 1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - // Mark the Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 10 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - } - - void populateNetworkSBTMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 R Max 2 - x0 --- x2 ---> x4 --- x6 ---> x7 - \ / / - 1 \ / / - \/ / - /\ / - 1 / \ / - / \ R / - x1 --- x3 ---> x5 - -1 - - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::MAX, 1 ); - nlr.addLayer( 4, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 4; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, -1 ); - nlr.setWeight( 3, 0, 4, 0, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Mark the Max sources - nlr.addActivationSource( 2, 0, 3, 0 ); - nlr.addActivationSource( 2, 1, 3, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 7 ); - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 8 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - } - - void populateNetworkSBTSoftmax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - - x0 x3 S x6 - - x1 x4 S x7 - - x2 x5 S x8 - - x3 = x0 - x1 + x2 + 1 - x4 = -x0 + x1 + x2 + 2 - x5 = -x0 - x1 - x2 + 3 - - x6 x7 x8 = softmax(x3, x4, x5) - - x9 = x6 + x7 + x8 - x10 = x6 + x7 + x8 - - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, -1 ); - nlr.setWeight( 0, 0, 1, 2, -1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 2, -1 ); - nlr.setWeight( 0, 2, 1, 0, 1 ); - nlr.setWeight( 0, 2, 1, 1, 1 ); - nlr.setWeight( 0, 2, 1, 2, -1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 1, 1, 2 ); - nlr.setBias( 1, 2, 3 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 8 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 11 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - } - - void populateNetworkSBTSoftmax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - - x0 x3 S x8 - - x1 x4 S x9 - - x2 x5 S x10 - - x6 S x11 - - x7 S x12 - - x3 = x0 - x1 + x2 + 1 - x4 = -x0 + x1 + x2 + 2 - x5 = -x0 - x1 - x2 + 3 - x6 = -x0 - x1 - x2 + 2 - x7 = -x0 - x1 - x2 + 1 - - x8 x10 x12 = softmax(x3, x5, x7) - - x9 x11 = softmax(x4, x6) - - x13 = x8 + x10 + x12 - x14 = -x8 - x10 - x12 - x15 = x9 + x11 - x16 = -x9 - x11 - - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 5 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 5 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 4 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, -1 ); - nlr.setWeight( 0, 0, 1, 2, -1 ); - nlr.setWeight( 0, 0, 1, 3, -1 ); - nlr.setWeight( 0, 0, 1, 4, -1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 2, -1 ); - nlr.setWeight( 0, 1, 1, 3, -1 ); - nlr.setWeight( 0, 1, 1, 4, -1 ); - nlr.setWeight( 0, 2, 1, 0, 1 ); - nlr.setWeight( 0, 2, 1, 1, 1 ); - nlr.setWeight( 0, 2, 1, 2, -1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - nlr.setWeight( 0, 2, 1, 4, -1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 4, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - nlr.setWeight( 2, 4, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 2, 1 ); - nlr.setWeight( 2, 3, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 3, -1 ); - nlr.setWeight( 2, 3, 3, 3, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 1, 1, 2 ); - nlr.setBias( 1, 2, 3 ); - nlr.setBias( 1, 3, 2 ); - nlr.setBias( 1, 4, 1 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 4, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 4, 2, 2 ); - nlr.addActivationSource( 1, 0, 2, 4 ); - nlr.addActivationSource( 1, 2, 2, 4 ); - nlr.addActivationSource( 1, 4, 2, 4 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 3 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 4 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 4 ), 12 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 13 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 3 ), 16 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 17 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - } - - void populateNetworkSBTBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - - x0 x2 - x x4 -- x5 - x1 x3 - - x2 = x0 - 2 * x1 - x3 = x0 + x1 - x4 = -x5 - - x4 = x2 * x3 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -2 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, -1 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 5 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 6 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - } - - void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 R -1 R -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ R / \ R / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using ReLU activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardReLU2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = ReLU( x3 ) - x8 = ReLU( x4 ) - x9 = ReLU( x5 ) - x10 = ReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = ReLU( x11 ) - x15 = ReLU( x12 ) - x16 = ReLU( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = ReLU( x17 ) - x20 = ReLU( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::RELU, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 S -1 S -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ S / \ S / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Sigmoid activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardSigmoid2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Sigmoid( x3 ) - x8 = Sigmoid( x4 ) - x9 = Sigmoid( x5 ) - x10 = Sigmoid( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Sigmoid( x11 ) - x15 = Sigmoid( x12 ) - x16 = Sigmoid( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Sigmoid( x17 ) - x20 = Sigmoid( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 Sign -1 Sign -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ Sign / \ Sign / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Sign activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardSign2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Sign( x3 ) - x8 = Sign( x4 ) - x9 = SIgn( x5 ) - x10 = Sign( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Sign( x11 ) - x15 = Sign( x12 ) - x16 = Sign( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Sign( x17 ) - x20 = Sign( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 Rnd -1 Rnd -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ Rnd / \ Rnd / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Round activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Round sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardRound2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Round( x3 ) - x8 = Round( x4 ) - x9 = Round( x5 ) - x10 = Round( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Round( x11 ) - x15 = Round( x12 ) - x16 = Round( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Round( x17 ) - x20 = Round( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Round sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardAbs( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 A -1 A -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ A / \ A / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Absolute value activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardAbs2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Abs( x3 ) - x8 = Abs( x4 ) - x9 = Abs( x5 ) - x10 = Abs( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Abs( x11 ) - x15 = Abs( x12 ) - x16 = Abs( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Abs( x17 ) - x20 = Abs( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 LR -1 LR -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ LR / \ LR / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardLeakyRelu2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = LeakyReLU( x3 ) - x8 = LeakyReLU( x4 ) - x9 = LeakyReLU( x5 ) - x10 = LeakyReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = LeakyReLU( x11 ) - x15 = LeakyReLU( x12 ) - x16 = LeakyReLU( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = LeakyReLU( x17 ) - x20 = LeakyReLU( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - nlr.getLayer( 6 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - a a' - x e - b b' f h - y d - c c' - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::MAX, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - - void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 - x1 x12 x15 x17 - x5 x9 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7, x8, x9, x10 = Softmax( x2, x3, x4, x5 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14, x15, x16 = Softmax( x11, x12, x13 ) - - x17 = Max( x14, x15, x16 ) - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 5, NLR::Layer::MAX, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 3, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 2 ); - nlr.addActivationSource( 1, 0, 2, 3 ); - nlr.addActivationSource( 1, 1, 2, 3 ); - nlr.addActivationSource( 1, 2, 2, 3 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - nlr.addActivationSource( 3, 2, 4, 0 ); - nlr.addActivationSource( 3, 0, 4, 1 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 1 ); - nlr.addActivationSource( 3, 0, 4, 2 ); - nlr.addActivationSource( 3, 1, 4, 2 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 4, 0, 5, 0 ); - nlr.addActivationSource( 4, 1, 5, 0 ); - nlr.addActivationSource( 4, 2, 5, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 18 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - } - - void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - a a' - x e - b b' f h - y d - c c' - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU/Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - x3 x7 - x0 - x4 x8 x11 x13 - x1 x15 - x5 x9 x12 x14 - x2 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = ReLU( x3 ) - x8 = ReLU( x4 ) - x9 = ReLU( x5 ) - x10 = ReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - - x13 = ReLU( x11 ) - x14 = ReLU( x12 ) - - x15 = x13 * x14 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::BILINEAR, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setBias( 3, 0, 2 ); - - // Mark the ReLU/Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - nlr.addActivationSource( 4, 0, 5, 0 ); - nlr.addActivationSource( 4, 1, 5, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 13 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 14 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 15 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 16 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - } - - void test_evaluate_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetwork( nlr ); - - double input[2]; - double output[2]; - - // With ReLUs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With ReLUs, case 1 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); - - // With ReLUs, case 2 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); - } - - void test_evaluate_sigmoids() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSigmoids( nlr ); - - double input[2]; - double output[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); - - // case 3 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); - } - - void test_evaluate_abs() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbs( nlr ); - - double input[2]; - double output[2]; - - // With Abs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Abs, case 1 - input[0] = -2; - input[1] = -2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Abs, case 2 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); - } - - void test_evaluate_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSign( nlr ); - - double input[2]; - double output[2]; - - // With Sign, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Sign, case 1 - input[0] = -2; - input[1] = -2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Sign, case 2 (0 considered "non-negative", sign(0)=1) - input[0] = -1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); - } - - void test_evaluate_round() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRound( nlr ); - - double input[2]; - double output[2]; - - // With Round, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Round, case 1 - input[0] = 2.1; - input[1] = 1.4; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); - - // With Round, case 2 - input[0] = 2.1; - input[1] = 1.6; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); - } - - void test_evaluate_leaky_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyRelu( nlr ); - - double input[2]; - double output[2]; - - // With Leaky ReLU, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Leaky ReLU, case 1 (alpha=0.1) - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); - - // With Leaky ReLU, case 2 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); - } - - void test_evaluate_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithMax( nlr ); - - double input[2]; - double output[1]; - - // With Max, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); - - // With Max, case 1 - input[0] = 1; - input[1] = -3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); - - // With Max, case 2 - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); - } - - - void test_evaluate_softmax() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmax( nlr ); - - double input[2]; - double output[2]; - - // With Softmax, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); - - // With Softmax, case 1 - input[0] = 1; - input[1] = -3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); - - // With Softmax, case 2 - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); - } - - void test_evaluate_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithBilinear( nlr ); - - double input[2]; - double output[1]; - - // With Bilinear, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - - // With Bilinear, case 1 - input[0] = 0.1; - input[1] = -0.3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); - - // With Bilinear, case 2 - input[0] = -0.3; - input[1] = 0.3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); - } - - void test_evaluate_non_consecutive_layers() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 0, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Set the weights and relus - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 2, 3, 1, -2 ); - nlr.setWeight( 0, 1, 3, 1, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 2, 5, 0, 1 ); - - // Evaluate - double input[2]; - double output; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); - - input[0] = -1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); - - TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); - } - - void test_evaluate_abs_and_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbsAndRelu( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - } - - void test_evaluate_round_and_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRoundAndSign( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1.6; - input[1] = 1.4; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); - - input[0] = 1.6; - input[1] = 1.6; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); - } - - void test_evaluate_leaky_relu_and_sigmoid() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyReluAndSigmoid( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); - } - - void test_evaluate_softmax_and_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmaxAndMax( nlr ); - - double input[2]; - double output[1]; - - input[0] = 1; - input[1] = -3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); - - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); - } - - void test_evaluate_relu_and_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithReluAndBilinear( nlr ); - - double input[2]; - double output[1]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - } - - void test_store_into_other() - { - NLR::NetworkLevelReasoner nlr; - - populateNetwork( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - // With ReLUs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // With ReLUs, case 1 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_sigmoids() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSigmoids( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_round() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRound( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSign( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_abs() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbs( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_leaky_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyRelu( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithMax( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_softmax() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmax( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithBilinear( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_generate_input_query() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Variable indexing - - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - - // Set the weights and biases for the weighted sum layers - - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 1, 1, 0 ); - nlr.setBias( 1, 2, 0 ); - - nlr.setBias( 3, 0, 0 ); - nlr.setBias( 3, 1, 2 ); - - nlr.setBias( 5, 0, 0 ); - nlr.setBias( 5, 1, 0 ); - - // Mark the ReLU/Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Start the testing - Query ipq; - nlr.generateQuery( ipq ); - List unhandledEquations; - Set varsInUnhandledConstraints; - TS_ASSERT( - ipq.constructNetworkLevelReasoner( unhandledEquations, varsInUnhandledConstraints ) ); - NLR::NetworkLevelReasoner *reconstructedNlr = ipq.getNetworkLevelReasoner(); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - } - - void test_simulate_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetwork( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With ReLUs, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With ReLUs, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 1 ) ); - } - - // With ReLUs, case 1 and 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0 ) ); - } - } - - void test_simulate_sigmoids() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSigmoids( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.6750, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 3.0167, - 0.0001 ) ); - } - - // case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.6032, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.5790, - 0.0001 ) ); - } - - // case 3 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.5045, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.1957, - 0.0001 ) ); - } - } - - void test_simulate_round() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRound( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Round, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With Round, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 2.1 ) ); - simulations2.append( Vector( simulationSize, 1.4 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4, - 0.0001 ) ); - } - - // With Round, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 2.1 ) ); - simulations3.append( Vector( simulationSize, 1.6 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -12, - 0.0001 ) ); - } - } - - void test_simulate_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSign( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Sign, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With Sign, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -2 ) ); - simulations2.append( Vector( simulationSize, -2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With Sign, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -1 ) ); - simulations3.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4 ) ); - } - } - - void test_simulate_abs() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbs( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Abs, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With Abs, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -2 ) ); - simulations2.append( Vector( simulationSize, -2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With Abs, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 4 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 10 ) ); - } - } - - void test_simulate_leaky_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyRelu( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Leaky ReLU, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With Leaky ReLU, case 1 (alpha=0.1) - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.9, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0.57, - 0.0001 ) ); - } - - // With Leaky ReLU, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -0.04, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -0.76, - 0.0001 ) ); - } - } - - void test_simulate_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithMax( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Max, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -3 ) ); - } - - // With Max, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, -3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -18 ) ); - } - - // With Max, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -3 ) ); - simulations3.append( Vector( simulationSize, 3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -5 ) ); - } - } - - void test_simulate_softmax() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmax( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Softmax, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.2999, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.4001, - 0.0001 ) ); - } - - // With Softmax, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, -3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.1192, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.7615, - 0.0001 ) ); - } - - // With Softmax, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -3 ) ); - simulations3.append( Vector( simulationSize, 3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.1206, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.7588, - 0.0001 ) ); - } - } - - void test_simulate_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithBilinear( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Bilinear, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } - - // With Bilinear, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 0.1 ) ); - simulations2.append( Vector( simulationSize, -0.3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2.8304, - 0.0001 ) ); - } - - // With Bilinear, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -0.3 ) ); - simulations3.append( Vector( simulationSize, 0.3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.0912, - 0.0001 ) ); - } - } - - void test_simulate_non_consecutive_layers() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 0, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Set the weights and relus - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 2, 3, 1, -2 ); - nlr.setWeight( 0, 1, 3, 1, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 2, 5, 0, 1 ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // Simulate1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2 ) ); - - // Simulate2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } - - void test_simulate_abs_and_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbsAndRelu( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // Simulate1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2 ) ); - } - - // Simulate2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 4 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - } - - void test_simulate_round_and_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRoundAndSign( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Round/Sign, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1.6 ) ); - simulations1.append( Vector( simulationSize, 1.4 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -2 ) ); - } - - // With Round/Sign, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1.6 ) ); - simulations2.append( Vector( simulationSize, 1.6 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4 ) ); - } - } - - void test_simulate_leaky_relu_and_sigmoid() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyReluAndSigmoid( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With LeakyReLU/Sigmoid, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.7109, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 1.4602, - 0.0001 ) ); - } - - // With LeakyReLU/Sigmoid, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.4013, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0.6508, - 0.0001 ) ); - } - } - - void test_simulate_softmax_and_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmaxAndMax( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With LeakyReLU/Sigmoid, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, -3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -2.9998, - 0.0001 ) ); - } - - // With LeakyReLU/Sigmoid, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -3 ) ); - simulations2.append( Vector( simulationSize, 3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1, - 0.0001 ) ); - } - } - - void test_simulate_relu_and_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithReluAndBilinear( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Relu/Bilinear, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - } - - // With ReLU/Bilinear, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } - } - - void test_interval_arithmetic_bound_propagation_relu_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_abs_constraints() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Layer dependenices - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), - - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_sign_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSign( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, 3 ); - tableau.setUpperBound( 0, 4 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, 4, Tightening::LB ), Tightening( 2, 5, Tightening::UB ), - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 11, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, 1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - - Tightening( 12, 1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, 4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_leaky_relu_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithLeakyRelu( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), - - Tightening( 9, -0.28, Tightening::LB ), Tightening( 9, 10.1, Tightening::UB ), - Tightening( 11, -0.38, Tightening::LB ), Tightening( 11, 9.1, Tightening::UB ), - - Tightening( 12, -0.28, Tightening::LB ), Tightening( 12, 10.1, Tightening::UB ), - Tightening( 13, -1.42, Tightening::LB ), Tightening( 13, 37.4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), - Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), - - Tightening( 9, -0.34, Tightening::LB ), Tightening( 9, 7.1, Tightening::UB ), - Tightening( 11, -0.32, Tightening::LB ), Tightening( 11, 7.3, Tightening::UB ), - - Tightening( 12, -0.34, Tightening::LB ), Tightening( 12, 7.1, Tightening::UB ), - Tightening( 13, -1.3, Tightening::LB ), Tightening( 13, 29, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_round_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithRound( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, 1.4 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, -1.4 ); - tableau.setUpperBound( 1, 2.1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), - Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), - - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), - Tightening( 11, -7, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), - Tightening( 13, -25, Tightening::LB ), Tightening( 13, 35, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3.1 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, 1.4 ); - tableau.setUpperBound( 1, 2.1 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), - Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), - Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - - Tightening( 9, -16, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -15, Tightening::LB ), Tightening( 11, 2, Tightening::UB ), - - Tightening( 12, -16, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -61, Tightening::LB ), Tightening( 13, 7, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_sigmoid_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSigmoids( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.5000, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), - Tightening( 5, 0.0067, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), - Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.7311, Tightening::UB ), - - Tightening( 8, -0.2244, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), - Tightening( 10, 0.3948, Tightening::LB ), Tightening( 10, 2.2244, Tightening::UB ), - - Tightening( 9, 0.4441, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), - Tightening( 11, 0.5974, Tightening::LB ), Tightening( 11, 0.9024, Tightening::UB ), - - Tightening( 12, 0.4441, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), - Tightening( 13, 2.2364, Tightening::LB ), Tightening( 13, 3.5399, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.1192, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), - Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), - - Tightening( 8, -0.7616, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), - Tightening( 10, 0.2384, Tightening::LB ), Tightening( 10, 2.6052, Tightening::UB ), - - Tightening( 9, 0.3183, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), - Tightening( 11, 0.5593, Tightening::LB ), Tightening( 11, 0.9312, Tightening::UB ), - - Tightening( 12, 0.3183, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), - Tightening( 13, 1.9963, Tightening::LB ), Tightening( 13, 3.6263, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_max_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithMax( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 10, Tightening::UB ), - Tightening( 9, -8, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), - - Tightening( 11, -10, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -8, Tightening::LB ), Tightening( 3, 9, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 9, Tightening::UB ), - Tightening( 7, -5, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 16, Tightening::UB ), - Tightening( 9, -14, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -5, Tightening::LB ), Tightening( 10, 16, Tightening::UB ), - - Tightening( 11, -16, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_softmax_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSoftmax( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), - Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), - - Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - - Tightening( 9, 0.0240, Tightening::LB ), Tightening( 9, 0.8349, Tightening::UB ), - Tightening( 11, 0.1651, Tightening::LB ), Tightening( 11, 0.9759, Tightening::UB ), - - Tightening( 12, 0.0240, Tightening::LB ), Tightening( 12, 0.8349, Tightening::UB ), - Tightening( 13, 0.5192, Tightening::LB ), Tightening( 13, 3.7629, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), - - Tightening( 9, 0.0184, Tightening::LB ), Tightening( 9, 0.8678, Tightening::UB ), - Tightening( 11, 0.1322, Tightening::LB ), Tightening( 11, 0.9816, Tightening::UB ), - - Tightening( 12, 0.0184, Tightening::LB ), Tightening( 12, 0.8678, Tightening::UB ), - Tightening( 13, 0.4151, Tightening::LB ), Tightening( 13, 3.8125, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_bilinear_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithBilinear( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -0.1 ); - tableau.setUpperBound( 0, 0.1 ); - tableau.setLowerBound( 1, -0.1 ); - tableau.setUpperBound( 1, 0.1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0.9, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), - Tightening( 3, -0.5, Tightening::LB ), Tightening( 3, 0.5, Tightening::UB ), - Tightening( 4, -0.3, Tightening::LB ), Tightening( 4, 0.3, Tightening::UB ), - Tightening( 5, -0.3, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), - - Tightening( 6, -0.55, Tightening::LB ), Tightening( 6, 0.55, Tightening::UB ), - Tightening( 7, -0.09, Tightening::LB ), Tightening( 7, 0.09, Tightening::UB ), - - Tightening( 8, 1.36, Tightening::LB ), Tightening( 8, 2.64, Tightening::UB ), - Tightening( 9, -0.64, Tightening::LB ), Tightening( 9, 0.64, Tightening::UB ), - - Tightening( 10, -1.6896, Tightening::LB ), Tightening( 10, 1.6896, Tightening::UB ), - - Tightening( 11, -1.6896, Tightening::LB ), Tightening( 11, 1.6896, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -0.3 ); - tableau.setUpperBound( 0, 0.1 ); - tableau.setLowerBound( 1, -0.1 ); - tableau.setUpperBound( 1, 0.2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, 0.7, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), - Tightening( 3, -0.8, Tightening::LB ), Tightening( 3, 0.9, Tightening::UB ), - Tightening( 4, -0.5, Tightening::LB ), Tightening( 4, 0.5, Tightening::UB ), - Tightening( 5, -0.6, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), - - Tightening( 6, -0.88, Tightening::LB ), Tightening( 6, 0.99, Tightening::UB ), - Tightening( 7, -0.3, Tightening::LB ), Tightening( 7, 0.3, Tightening::UB ), - - Tightening( 8, 0.82, Tightening::LB ), Tightening( 8, 3.29, Tightening::UB ), - Tightening( 9, -1.29, Tightening::LB ), Tightening( 9, 1.18, Tightening::UB ), - - Tightening( 10, -4.2441, Tightening::LB ), Tightening( 10, 3.8822, Tightening::UB ), - - Tightening( 11, -3.8822, Tightening::LB ), Tightening( 11, 4.2441, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_abs_and_relu_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithAbsAndRelu( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -5, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), - - Tightening( 10, -10, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_round_and_sign_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithRoundAndSign( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, 1.4 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, -1.4 ); - tableau.setUpperBound( 1, 2.1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), - Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), - - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3.1 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, 1.4 ); - tableau.setUpperBound( 1, 2.1 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), - Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), - Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_leaky_relu_and_sigmoid_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithLeakyReluAndSigmoid( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), - - Tightening( 9, 0.0573, Tightening::LB ), Tightening( 9, 0.9999, Tightening::UB ), - Tightening( 11, 0.0219, Tightening::LB ), Tightening( 11, 0.9999, Tightening::UB ), - - Tightening( 12, 0.0573, Tightening::LB ), Tightening( 12, 0.9999, Tightening::UB ), - Tightening( 13, 0.1229, Tightening::LB ), Tightening( 13, 3.9996, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), - Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), - - Tightening( 9, 0.0323, Tightening::LB ), Tightening( 9, 0.9992, Tightening::UB ), - Tightening( 11, 0.0392, Tightening::LB ), Tightening( 11, 0.9993, Tightening::UB ), - - Tightening( 12, 0.0323, Tightening::LB ), Tightening( 12, 0.9992, Tightening::UB ), - Tightening( 13, 0.1498, Tightening::LB ), Tightening( 13, 3.9972, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_softmax_and_max_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSoftmaxAndMax( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { Tightening( 2, 0, Tightening::LB ), - Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), - Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), - Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.0066, Tightening::LB ), - Tightening( 3, 0.9517, Tightening::UB ), - Tightening( 5, 0.0007, Tightening::LB ), - Tightening( 5, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), - Tightening( 7, 0.7297, Tightening::UB ), - - Tightening( 8, -0.7225, Tightening::LB ), - Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 9, 0.3192, Tightening::LB ), - Tightening( 9, 2.9819, Tightening::UB ), - - Tightening( 10, 0.3192, Tightening::LB ), - Tightening( 10, 2.9819, Tightening::UB ), - - Tightening( 11, -2.9819, Tightening::LB ), - Tightening( 11, -0.3192, Tightening::UB ) } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { Tightening( 2, -2, Tightening::LB ), - Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), - Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), - Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), - Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), - Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), - Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.0654, Tightening::LB ), - Tightening( 9, 2.9933, Tightening::UB ), - - Tightening( 10, 0.0654, Tightening::LB ), - Tightening( 10, 2.9933, Tightening::UB ), - - Tightening( 11, -2.9933, Tightening::LB ), - Tightening( 11, -0.0654, Tightening::UB ) } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_relu_and_bilinear_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithReluAndBilinear( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), - - Tightening( 11, -49, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -14, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), - - Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_sbt_relus_all_active() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Both ReLUs active, bound survive through activations: - - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] - */ - - List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_relus_active_and_inactive() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is inactive, bounds get zeroed - Second ReLU is active, bounds surive the activation - - x4.lb = 0 - x4.ub = 0 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] - */ - - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_relus_active_and_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is undecided, bound is concretized. - Coefficient: 12/(12--4) = 12/16 = 0.75 - Second ReLU is active, bounds surive the activation - - x4 range: [0, 12] - x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 - x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = 0.5x0 + 1.25x1 - 11.25 - x6.ub = 0.5x0 + 1.25x1 - 8.25 - - x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] - */ - - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_relus_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, one of the ReLU's variables has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is inactive (set externally), bounds get zeroed - Second ReLU is active, bounds surive the activation - - x4.lb = 0 - x4.ub = 0 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_relu_residual1() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTReluResidual1( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - - Layer 1: - - x1.lb = x0 : [-1, 1] - x1.ub = x0 : [-1, 1] - - ReLU is undecided, bound is concretized. - Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - - x2.lb = 0.5x0 - x2.ub = 0.5x0 + 0.5 - x2 range: [0, 1] - - Layer 2 (with residual from x0): - - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] - x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] - x3 range: [-1, 2.5] - - ReLU is undecided, bound is concretized. - Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. - - x4.lb = 0 - x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] - x4 range: [0, 2.5] - - Layer 3 (with residual from x1): - - x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] - x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] - - x5 range: [-2, 4] - */ - - List expectedBounds( { - Tightening( 1, -1, Tightening::LB ), - Tightening( 1, 1, Tightening::UB ), - Tightening( 2, 0, Tightening::LB ), - Tightening( 2, 1, Tightening::UB ), - Tightening( 3, -1, Tightening::LB ), - Tightening( 3, 2.5, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 2.5, Tightening::UB ), - Tightening( 5, 2, Tightening::LB ), - Tightening( 5, 5.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_relu_residual2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTReluResidual2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - - Layer 1: - - x1.lb = x0 : [-1, 1] - x1.ub = x0 : [-1, 1] - - ReLU is undecided, bound is concretized. - Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - - x2.lb = 0.5x0 - x2.ub = 0.5x0 + 0.5 - x2 range: [0, 1] - - Layer 2 (with residual from x0): - - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] - x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] - x3 range: [-1, 2.5] - - ReLU is undecided, bound is concretized. - Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. - - x4.lb = 0 - x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] - x4 range: [0, 2.5] - - Layer 3 (with residual from x0): - - x5.lb = 3 ( 0 ) + 1 ( x0 ) + 1 = 1x0 + 1 : [0, 2] - x5.ub = 3 ( -15/14 x0 + 20/14 ) + 1 ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 - = 7.5] x5 range: [0, 7.5] - - Layer 4: - x6.lb = 1x0 + 1 : [0, 2] - x6.ub = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] - x6 range: [0, 7.5] - */ - - List expectedBounds( { - Tightening( 1, -1, Tightening::LB ), - Tightening( 1, 1, Tightening::UB ), - Tightening( 2, 0, Tightening::LB ), - Tightening( 2, 1, Tightening::UB ), - Tightening( 3, -1, Tightening::LB ), - Tightening( 3, 2.5, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 2.5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 7.5, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), - Tightening( 6, 7.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_relu_reindex() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTReluReindex( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - x1: [-1, 1] - - Layer 1: - - x2.lb = x0 + x1 : [-2, 2] - x2.ub = x0 + x1 : [-2, 2] - - x3.lb = x0 - x1 : [-2, 2] - x3.ub = x0 - x1 : [-2, 2] - - Both ReLUs are undecided, bounds are concretized. - Coefficient: 2/( 2--2 ) = 2/4 = 0.5 - - x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 - x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 - x4 range: [0, 2] - - x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 - x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 - x5 range: [0, 2] - - Layer 2: - - x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] - x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] - x6 range: [-1, 3] - - x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] - x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] - x7 range: [-2, 2] - - Both ReLUs are undecided, bounds are concretized. - Coefficient (first ReLU, lower): 1/( 1--1 ) = 1/2 = 0.5 - Coefficient (first ReLU, upper): 1 (propagated as is) - Coefficient (second ReLU, lower): 0 (bound is zero) - Coefficient (second ReLU, upper): 2/( 2--2 ) = 2/4 = 0.5 - - x8.lb = 0.5 ( x0 ) = 0.5x0 - x8.ub = x0 + 2 - x8 range: [0, 3] - - x9.lb = 0 - x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 - x9 range: [0, 2] - - Layer 3: - - x10.lb = 1 ( 0.5x0 ) + 1 ( 0 ) + 1 = 0.5x0 + 1 : [0.5, 1.5] - x10.ub = 1 ( x0 + 2 ) + 1 ( 0.5x1 + 1.5 ) + 1 = x0 + 0.5x1 + 4.5 : [3, 6] - x10 range: [0.5, 6] - - x11.lb = 0.5x1 - 0.5 - x11.ub = 0.5x1 + 1.5 - x11 range: [0, 2] - - */ - - List expectedBounds( - { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, 0.5, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_abs_all_positive() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Both absolute values positive, bound survive through activations: - - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] - */ - - List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_abs_positive_and_negative() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First absolute value is negative, bounds get flipped - Second absolute value is positive, bounds surive the activation - - x4.lb = -2x0 -3x1 + 30 : [3, 19] - x4.ub = -2x0 -3x1 + 30 : [3, 19] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] - x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] - */ - - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 19, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_absolute_values_positive_and_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First absolute value is undecided, bounds are concretized. - Second absolute value is active, bounds surive the activation - - x4 range: [0, 12] - x4.lb = 0 - x4.ub = 12 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 + 12 : [ 1, 7] - - x6 range: [-11, 7] - */ - - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, 7, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_absolute_values_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, the weighted sum variable has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2 is eliminated, everything set to -3 - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Second absolute value is positive, bounds surive the activation - - x4: all set to 3 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 + 3 : [-8, -2] - x6.ub = - x0 - x1 + 3 : [-8, -2] - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, -2, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_signs_positive_and_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First sign is undecided, bounds are concretized. - Second sign is active, bounds become constant 1 - Coefficient (first Sign, lower): 2/12 = 1/6. - Coefficient (first Sign, upper): -2/-4 = 1/2. - - x4 range: [-1, 1] - x4.lb = 1/6 ( 2x0 + 3x1 - 15 ) - 1 = 2/6 x0 + 3/6 x1 - 21/6 - x4.ub = 1/2 ( 2x0 + 3x1 - 15 ) + 1 = x0 + 1.5x1 - 6.5 - - x5 range: [1, 1] - x5.lb = 1 - x5.ub = 1 - - Layer 2: - - x6.lb = 1 ( 2/6 x0 + 3/6 x1 - 21/6 ) - 1 ( 1 ) = 2/6 x0 + 3/6 x1 - 27/6 : [-16/6, 0] - x6.ub = 1 ( x0 + 1.5x1 - 6.5 ) - 1 ( 1 ) = x0 + 1.5x1 - 7.5 : [-2, 6] - - x6 range: [-8/3, 6] - */ - - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), - Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), - Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2.6667, Tightening::LB ), - Tightening( 6, 6, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_signs_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, the weighted sum variable has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2 is eliminated, everything set to -3 - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First sign is negative, bounds become constant -1 - Second sign is positive, bounds become constant 1 - - x4: all set to -1 - - x5: all set to 1 - - Layer 2: - - x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 - x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), - Tightening( 4, -1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), - Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), - Tightening( 6, -2, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_leaky_relu() - { - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - x1: [-1, 1] - - Layer 1: - - x2.lb = x0 + x1 : [-2, 2] - x2.ub = x0 + x1 : [-2, 2] - - x3.lb = x0 - x1 : [-2, 2] - x3.ub = x0 - x1 : [-2, 2] - - Both LeakyReLUs are undecided, bounds are concretized. - Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 - Bias: ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 - - x4.lb = x0 + x1 - x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6x0 + 0.6x1 + 0.8 - x4 range: [-0.4, 2] - - x5.lb = x0 - x1 - x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6x0 - 0.6x1 + 0.8 - x5 range: [-0.4, 2] - - Layer 2: - - x6.lb = 1 ( x0 + x1 ) + 1 ( x0 - x1 ) = 2x0 : [-2, 2] - x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2x0 + 1.6 : [0.4, 2.8] - x6 range: [-2, 2.8] - - x7.lb = 1 ( x0 + x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] - x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( x0 - x1 ) = -0.4x0 + 1.6x1 + 0.8 : [-1.2, 2.8] - x7 range: [-2.8, 2.8] - - Both LeakyReLUs are undecided, bounds are concretized. - Coefficient (first LeakyReLU): ( 2.8 - 0.2*-2 )/( 2.8--2 ) = 3.2/4.8 = 10/15 - Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2 / ( 2.8--2 ) = 14/15 - - Coefficient (second LeakyReLU): ( 2.8 - 0.2*-2.8 )/( 2.8--2.8 ) = 3.36/5.6 = 0.6 - Bias (second LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2.8 / ( 2.8--2.8 ) = 1.12 - - x8.lb = 2x0 - x8.ub = 10/15 ( 1.2x0 + 1.6 ) + 14/15 = 0.8x0 + 2 - x8 range: [-0.4, 2.8] - - x9.lb = 0.4x0 + 1.6x1 - 0.8 - x9.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 - x9 range: [-0.56, 2.8] - - Layer 3: - - x10.lb = 1 ( 0.4x0 + 1.6x1 - 0.8 ) + 1 ( 2x0 ) + 1 = 2.4x0 + 1.6x1 + 0.2 : [-3.8, 5.2] - x10.ub = 1 ( -0.24 x0 + 0.96 x1 + 1.6 ) + 1 ( 0.8x0 + 2 ) + 1 = 0.56x0 + 0.96x1 + 4.6 : - [3.08, 6.12] x10 range: [-3.8, 6.12] - - x11.lb = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] - x11.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 : [0.4, 2.8] - x11 range: [-2.8, 2.8] - - */ - - List expectedBounds( - { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.4, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -0.4, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), - Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), - - Tightening( 8, -0.4, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), - Tightening( 9, -0.56, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), - - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 6.12, Tightening::UB ), - Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_sigmoids_and_round() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSigmoidsAndRound( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - // Layer 1 - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); - - // Layer 2 - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); - - // Layer 3 - /* - Double-check with Python - --- - from math import exp as e - def g(x): - return 1 / (1 + e(-x)) - - def g_prime(x): - return g(x) * (1 - g(x)) - - def lam(l, u): - return (g(u) - g(l)) / (u - l) - - def lam_prime(l, u): - return min(g_prime(l), g_prime(u)) - - l3 = l4 = -2 - u3 = u4 = 2 - l5 = l6 = g(-2) - u5 = u6 = g(2) - lambda7 = lam(l3, u3) - lambda7_prime = lam_prime(l3, u3) - lambda8 = lam(l4, u4) - lambda8_prime = lam_prime(l4, u4) - x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) - x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) - x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) - x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) - print(x7_l) - print(x7_u) - print(x8_l) - print(x8_u) - --- - [output]: - 0.4483930148512481 - 1.5516069851487517 - -0.5516069851487517 - 0.5516069851487517 - */ - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); - - // Layer 4 - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); - } - - void test_sbt_max_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTMax( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - x1: [-1, 2] - - Layer 1: - - x2.lb = x0 + x1 : [-2, 3] - x2.ub = x0 + x1 : [-2, 3] - - x3.lb = x0 - x1 : [-3, 2] - x3.ub = x0 - x1 : [-3, 2] - - Both ReLUs are undecided, bounds are concretized. - Coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6 - Coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 - - x4.lb = 0.6 ( x0 + x1 ) = 0.6x0 + 0.6x1 - x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 - x4 range: [0, 3] - - x5.lb = 0.4 ( x0 - x1 ) = 0.4x0 + 0.4x1 - x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 - x5 range: [0, 2] - - Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub - Max inherits lower bound from x4, and its upper bound is constant 3. - - x6.lb = 0.6x0 + 0.6x1 : [-1.2, 1.8] - x6.ub = 3 : [3, 3] - x6 range: [-1.2, 3] - - Layer 3: - - x7.lb = 2 ( 0.6x0 + 0.6x1 ) = 1.2x0 + 1.8x1 : [-2.4, 3.6] - x7.ub = 2 ( 3 ) = 6 : [6, 6] - x7 range: [-2.4, 6] - */ - - List expectedBounds( { - Tightening( 2, -2, Tightening::LB ), - Tightening( 2, 3, Tightening::UB ), - Tightening( 3, -3, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), - Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -2.4, Tightening::LB ), - Tightening( 7, 6, Tightening::UB ), - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_max_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTMax( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -3 ); - tableau.setUpperBound( 1, -2 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 2] - x1: [-3, -2] - - Layer 1: - - x2.lb = x0 + x1 : [-2, 0] - x2.ub = x0 + x1 : [-2, 0] - - x3.lb = x0 - x1 : [3, 5] - x3.ub = x0 - x1 : [3, 5] - - First ReLU is negative, bounds become constant 0 - Second ReLU is positive, bounds survive the activation - - x4: all set to 0 - - x5.lb = x0 - x1 : [3, 5] - x5.ub = x0 - x1 : [3, 5] - - Max is fixed because x5.lb > x4.ub, it inherits x5's bounds - - x6.lb = x0 - x1 : [3, 5] - x6.ub = x0 - x1 : [3, 5] - - Layer 3: - - x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] - x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] - */ - - List expectedBounds( { - Tightening( 2, -2, Tightening::LB ), - Tightening( 2, 0, Tightening::UB ), - Tightening( 3, 3, Tightening::LB ), - Tightening( 3, 5, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 3, Tightening::LB ), - Tightening( 5, 5, Tightening::UB ), - Tightening( 6, 3, Tightening::LB ), - Tightening( 6, 5, Tightening::UB ), - Tightening( 7, 6, Tightening::LB ), - Tightening( 7, 10, Tightening::UB ), - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_softmax1() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - } - - void test_sbt_softmax2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - { - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 1.000001 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 1.000001 ); - tableau.setLowerBound( 2, 1 ); - tableau.setUpperBound( 2, 1.000001 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 1.0001] - x1: [1, 1.0001] - x2: [1, 1.0001] - */ - List expectedBounds( { Tightening( 3, 2, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 0, Tightening::UB ), - Tightening( 6, 0.2595, Tightening::LB ), - Tightening( 6, 0.2595, Tightening::UB ), - Tightening( 7, 0.7054, Tightening::LB ), - Tightening( 7, 0.7054, Tightening::UB ), - Tightening( 8, 0.0351, Tightening::LB ), - Tightening( 8, 0.0351, Tightening::UB ), - Tightening( 9, 1, Tightening::LB ), - Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), - Tightening( 10, -1, Tightening::UB ) - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - { - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 1.000001 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 1.000001 ); - tableau.setLowerBound( 2, 1 ); - tableau.setUpperBound( 2, 1.000001 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 1.0001] - x1: [1, 1.0001] - x2: [1, 1.0001] - */ - List expectedBounds( { Tightening( 3, 2, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 0, Tightening::UB ), - Tightening( 6, 0.2595, Tightening::LB ), - Tightening( 6, 0.2595, Tightening::UB ), - Tightening( 7, 0.7054, Tightening::LB ), - Tightening( 7, 0.7054, Tightening::UB ), - Tightening( 8, 0.0351, Tightening::LB ), - Tightening( 8, 0.0351, Tightening::UB ), - Tightening( 9, 1, Tightening::LB ), - Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), - Tightening( 10, -1, Tightening::UB ) - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - } - - void test_sbt_softmax3() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax2( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 1.00001 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 1.00001 ); - tableau.setLowerBound( 2, 1 ); - tableau.setUpperBound( 2, 1.00001 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 1.0001] - x1: [1, 1.0001] - x2: [1, 1.0001] - */ - - List expectedBounds( - { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), - Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), - Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), - Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), - Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), - Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), - Tightening( 13, 0.9470, Tightening::LB ), Tightening( 13, 0.9470, Tightening::UB ), - Tightening( 14, -0.9470, Tightening::LB ), Tightening( 14, -0.9470, Tightening::UB ), - Tightening( 15, 1.0253, Tightening::LB ), Tightening( 15, 1.0253, Tightening::UB ), - Tightening( 16, -1.0253, Tightening::LB ), Tightening( 16, -1.0253, Tightening::UB ) - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_softmax_bounds_er() - { - Vector inputLb = { -1, 0, 1 }; - Vector inputUb = { 0, 2, 4 }; - Vector input = { -0.5, 1, 2.5 }; - - double value = NLR::DeepPolySoftmaxElement::ERLowerBound( input, inputLb, inputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0114799, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.00563867, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.000838421, 0.00001 ) ); - - - Vector outputLb = { 0.2, 0, 0 }; - Vector outputUb = { 0.4, 0.1, 0.1 }; - - value = NLR::DeepPolySoftmaxElement::ERUpperBound( input, outputLb, outputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, -1.44538, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 1.96538, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.358535, 0.00001 ) ); - } - - void test_softmax_bounds_lse1() - { - Vector inputLb = { -1, 0, 1 }; - Vector inputUb = { 0, 2, 3 }; - Vector input = { -0.5, 1, 2 }; - double value = NLR::DeepPolySoftmaxElement::LSELowerBound( input, inputLb, inputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.00703444, 0.001 ) ); - - Vector outputLb = { 0.2, 0, 0 }; - Vector outputUb = { 0.4, 0.1, 0.1 }; - value = NLR::DeepPolySoftmaxElement::LSEUpperBound( input, outputLb, outputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.164165, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.272204, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.073207, 0.00001 ) ); - } - - void test_sbt_bilinear() - { - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -2 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 2] - x1: [-2, 1] - - Layer 1: - - x2.lb = x0 - 2x1 : [-1, 6] - x2.ub = x0 - 2x1 : [-1, 6] - - x3.lb = x0 + x1 : [-1, 3] - x3.ub = x0 + x1 : [-1, 3] - - Coefficients for bilinear layer: - Lower bound: - alpha_l = x3.lb = -1 - beta = x2.lb = -1 - gamma_l = -x2.lb * x3.lb = --1 * -1 = -1 - - Upper bound: - alpha_u = x3.ub = 3 - beta = x2.lb = -1 - gamma_u = -x2.lb * x3.ub = --1 * 3 = 3 - - x4.lb = -1 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + -1 = -2x0 + x1 - 1 : [-7, -2] - x4.ub = 3 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + 3 = 2x0 - 7x1 + 3 : [0, 21] - x4 range: [-7, 21] - - Layer 3: - - x7.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] - x7.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] - x4 range: [-21, 5] - */ - - List expectedBounds( { Tightening( 2, -1, Tightening::LB ), - Tightening( 2, 6, Tightening::UB ), - Tightening( 3, -1, Tightening::LB ), - Tightening( 3, 3, Tightening::UB ), - Tightening( 4, -7, Tightening::LB ), - Tightening( 4, 21, Tightening::UB ), - Tightening( 5, -21, Tightening::LB ), - Tightening( 5, 7, Tightening::UB ) } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_concretize_input_assignment() - { - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - - populateNetwork( nlr ); - - // With ReLUs, Inputs are zeros, only biases count - tableau.nextValues[0] = 0; - tableau.nextValues[1] = 0; - - Map assignment; - - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); - - TS_ASSERT( assignment.size() == 14 ); - TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 4 ) ); - - // With ReLUs, case 1 - tableau.nextValues[0] = 1; - tableau.nextValues[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); - - TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 1 ) ); - - // With ReLUs, case 2 - tableau.nextValues[0] = 1; - tableau.nextValues[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); - - TS_ASSERT( FloatUtils::areEqual( assignment[12], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 0 ) ); - } - - - void test_obtain_bound_from_ipq() - { - NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); - - Query query; - query.setNumberOfVariables( 14 ); - - - // Initialize the bounds - query.setLowerBound( 0, -1 ); - query.setUpperBound( 0, 1 ); - query.setLowerBound( 1, -1 ); - query.setUpperBound( 1, 1 ); - - double large = 1000; - query.setLowerBound( 2, -large ); - query.setUpperBound( 2, large ); - query.setLowerBound( 3, -large ); - query.setUpperBound( 3, large ); - query.setLowerBound( 4, -large ); - query.setUpperBound( 4, large ); - query.setLowerBound( 5, -large ); - query.setUpperBound( 5, large ); - query.setLowerBound( 6, -large ); - query.setUpperBound( 6, large ); - query.setLowerBound( 7, -large ); - query.setUpperBound( 7, large ); - query.setLowerBound( 8, -large ); - query.setUpperBound( 8, large ); - query.setLowerBound( 9, -large ); - query.setUpperBound( 9, large ); - query.setLowerBound( 10, -large ); - query.setUpperBound( 10, large ); - query.setLowerBound( 11, -large ); - query.setUpperBound( 11, large ); - query.setLowerBound( 12, -large ); - query.setUpperBound( 12, large ); - query.setLowerBound( 13, -large ); - query.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds( query ) ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); - } - - void test_backwards_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), - Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 8, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), - - Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 8, 0, Tightening::LB ), - Tightening( 9, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), - - Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), - Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), - - Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), - - Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), - - Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 19, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), - Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), - Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), - - Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), - Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), - Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), - - Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 20, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_sigmoid() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), - Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), - - Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), - Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), - - Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), - Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), - - Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), - Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), - Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), - - Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), - Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), - - Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), - Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), - - Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), - Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_sigmoid2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), - Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), - Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), - Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), - - Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), - Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), - Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), - - Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), - Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), - Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), - - Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), - Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), - - Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), - Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), - - Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), - Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), - Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), - - Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), - Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), - Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), - - Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), - Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), - Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), - - Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), - Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), - - Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), - Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), - - Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_abs() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), - - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), - - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_abs2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), - - Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), - - Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_round() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 11, -4, Tightening::LB ), - Tightening( 11, 6, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), - - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -10, Tightening::LB ), - Tightening( 7, 5, Tightening::UB ), - - Tightening( 11, -14, Tightening::LB ), - Tightening( 11, 11, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_round2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), - Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), - Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), - - Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), - Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), - - Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), - Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), - - Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), - Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), - Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), - Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), - - Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_sign() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_sign2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_leaky_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 4, -0.1, Tightening::LB ), - - Tightening( 8, -0.045, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), - - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), - - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), - - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_leaky_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyRelu2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), - Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), - Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), - Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), - Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), - - Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), - Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), - - Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), - Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), - - Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.3, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - Tightening( 10, -0.6, Tightening::LB ), - - Tightening( 14, -0.9, Tightening::LB ), - Tightening( 15, -0.89, Tightening::LB ), - Tightening( 16, -0.77, Tightening::LB ), - - Tightening( 19, -2.3133, Tightening::LB ), - Tightening( 20, -1.2, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), - Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), - Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), - Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), - Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), - - Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), - Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), - - Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), - Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), - - Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.5, Tightening::LB ), - Tightening( 9, -0.6, Tightening::LB ), - Tightening( 10, -1.5, Tightening::LB ), - - Tightening( 14, -1.1, Tightening::LB ), - Tightening( 15, -2.1771, Tightening::LB ), - Tightening( 16, -1.15, Tightening::LB ), - - Tightening( 19, -5.6259, Tightening::LB ), - Tightening( 20, -1.9, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_softmax_and_max() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), - Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), - Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), - - Tightening( 8, -0.1519, Tightening::LB ), Tightening( 8, 1.4775, Tightening::UB ), - Tightening( 9, 0.6614, Tightening::LB ), Tightening( 9, 2.3899, Tightening::UB ), - - Tightening( 10, 0.6614, Tightening::LB ), Tightening( 10, 2.3899, Tightening::UB ), - - Tightening( 11, -2.3899, Tightening::LB ), Tightening( 11, -0.6614, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.0674, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), - - Tightening( 10, 0.0674, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), - - Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.0674, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_softmax_and_max2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), - Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), - Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), - Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), - - Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9635, Tightening::UB ), - Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9806, Tightening::UB ), - Tightening( 13, -2.9902, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), - - Tightening( 14, 0.1086, Tightening::LB ), Tightening( 14, 0.9971, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8766, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4595, Tightening::UB ), - - Tightening( 17, 0.1086, Tightening::LB ), Tightening( 17, 0.9971, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), - - Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9989, Tightening::UB ), - Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), - Tightening( 13, -2.9989, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), - - Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9972, Tightening::UB ), - Tightening( 15, 0.0024, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), - - Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9972, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_relu_and_bilinear() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), - - Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_relu_and_bilinear2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), - Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), - - Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), - Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), - - Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_preimage_approximation_leaky_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-preimage-approx" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - /*List expectedBounds2( - { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2.1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.35, Tightening::UB ), - Tightening( 11, 1.35, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) );*/ - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), - - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), - - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - /*List expectedBounds4( - { Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), - - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) );*/ - } - - bool boundsEqual( const List &bounds, const List &expectedBounds ) - { - if ( bounds.size() != expectedBounds.size() ) - return false; - - bool allFound = true; - for ( const auto &bound : bounds ) - { - bool currentFound = false; - for ( const auto &expectedBound : expectedBounds ) - { - currentFound |= - ( bound._type == expectedBound._type && - bound._variable == expectedBound._variable && - FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); - } - allFound &= currentFound; - } - return allFound; - } - - void updateTableau( MockTableau &tableau, List &tightenings ) - { - ASSERT( tableau ); - for ( const auto &tightening : tightenings ) - { - if ( tightening._type == Tightening::LB ) - { - tableau.setLowerBound( tightening._variable, tightening._value ); - } - - if ( tightening._type == Tightening::UB ) - { - tableau.setUpperBound( tightening._variable, tightening._value ); - } - } - } -}; From 8d0bf198ba3b5eb45947e3a589a0bc25118c0f2c Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Dec 2024 20:56:50 +0200 Subject: [PATCH 31/81] Delete src/nlr/GlobalConfiguration.h --- src/nlr/GlobalConfiguration.h | 315 ---------------------------------- 1 file changed, 315 deletions(-) delete mode 100644 src/nlr/GlobalConfiguration.h diff --git a/src/nlr/GlobalConfiguration.h b/src/nlr/GlobalConfiguration.h deleted file mode 100644 index 78ab9f662e..0000000000 --- a/src/nlr/GlobalConfiguration.h +++ /dev/null @@ -1,315 +0,0 @@ -/********************* */ -/*! \file GlobalConfiguration.h - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz, Parth Shah, Derek Huang - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - - **/ - -#ifndef __GlobalConfiguration_h__ -#define __GlobalConfiguration_h__ - -#include "DivideStrategy.h" - -class GlobalConfiguration -{ -public: - static void print(); - - // The exponential moving average is calculated as - // ema = current * alpha + previous * (1 - alpha) - static const double EXPONENTIAL_MOVING_AVERAGE_ALPHA; - - // Whether to use SoI instead of Reluplex for local search for satisfying assignments - // to non-linear constraint. - static bool USE_DEEPSOI_LOCAL_SEARCH; - - // The quantity by which the score is bumped up for PLContraints not - // participating in the SoI. This promotes those constraints in the branching - // order. - static const double SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI; - - // Use the polarity metrics to decide which branch to take first in a case split - // and how to repair a ReLU constraint. - static const bool USE_POLARITY_BASED_DIRECTION_HEURISTICS; - - // The default epsilon used for comparing doubles - static const double DEFAULT_EPSILON_FOR_COMPARISONS; - - // The precision level when convering doubles to strings - static const unsigned DEFAULT_DOUBLE_TO_STRING_PRECISION; - - // How often should the main loop print statistics? - static const unsigned STATISTICS_PRINTING_FREQUENCY; - static const unsigned STATISTICS_PRINTING_FREQUENCY_GUROBI; - - // Tolerance when checking whether the value computed for a basic variable is out of bounds - static const double BOUND_COMPARISON_ADDITIVE_TOLERANCE; - static const double BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE; - - // Tolerance when checking whether a basic variable depends on a non-basic variable, by looking - // at the change column, as part of a pivot operation. - static const double PIVOT_CHANGE_COLUMN_TOLERANCE; - - // Tolerance for the difference when computing the pivot entry by column and by row - static const double PIVOT_ROW_AND_COLUMN_TOLERANCE; - - // Tolerance when checking whether a non-basic variable is eligible for being selected as the - // entering variable, by its reduced cost - static const double ENTRY_ELIGIBILITY_TOLERANCE; - - // Ratio test tolerance constants - static const double RATIO_CONSTRAINT_ADDITIVE_TOLERANCE; - static const double RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE; - static const double HARRIS_RATIO_CONSTRAINT_ADDITIVE_TOLERANCE; - static const double HARRIS_RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE; - - // Cost function tolerance constants - static const double BASIC_COSTS_ADDITIVE_TOLERANCE; - static const double BASIC_COSTS_MULTIPLICATIVE_TOLERANCE; - - // Sparse ForrestTomlin diagonal element tolerance constant - static const double SPARSE_FORREST_TOMLIN_DIAGONAL_ELEMENT_TOLERANCE; - - // Toggle use of Harris' two-pass ratio test for selecting the leaving variable - static const bool USE_HARRIS_RATIO_TEST; - - // Toggle query-preprocessing on/off. - static const bool PREPROCESS_INPUT_QUERY; - - // Assuming the preprocessor is on, toggle whether or not it will attempt to perform variable - // elimination. - static const bool PREPROCESSOR_ELIMINATE_VARIABLES; - - // Toggle whether or not PL/NL constraints will be called upon - // to add auxiliary variables and equations after preprocessing. - static const bool PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING; - static const bool NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING; - - // If the difference between a variable's lower and upper bounds is smaller than this - // threshold, the preprocessor will treat it as fixed. - static const double PREPROCESSOR_ALMOST_FIXED_THRESHOLD; - - // Maximal rounds of tightening to perform in the preprocessor to avoid non-termination. - static const unsigned PREPROCESSSING_MAX_TIGHTEING_ROUND; - - // Try to set the initial tableau assignment to an assignment that is legal with - // respect to the input network. - static const bool WARM_START; - - // The maximal number of iterations without new tree states being visited, before - // the engine performs a precision restoration. - static const unsigned MAX_ITERATIONS_WITHOUT_PROGRESS; - - // How often should the main loop check the current degradation? - static const unsigned DEGRADATION_CHECKING_FREQUENCY; - - // The threshold of degradation above which restoration is required - static const double DEGRADATION_THRESHOLD; - - // If a pivot element in a simplex iteration is smaller than this threshold, the engine will - // attempt to pick another element. - static const double ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD; - - // If true, column-merging equations are given special treatment and cause columns in the - // tableau to be merged (instead of a new row added). - static const bool USE_COLUMN_MERGING_EQUATIONS; - - // If a pivot element in a Gaussian elimination iteration is smaller than this threshold times - // the largest element in the column, the elimination engine will attempt to pick another pivot. - static const double GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD; - - // How many potential pivots should the engine inspect (at most) in every simplex iteration? - static const unsigned MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS; - - static const DivideStrategy SPLITTING_HEURISTICS; - - // The frequency to use interval splitting when largest interval splitting strategy is in use. - static const unsigned INTERVAL_SPLITTING_FREQUENCY; - - // When automatically deciding which splitting strategy to use, we use relu-splitting if - // the number of inputs is larger than this number. - static const unsigned INTERVAL_SPLITTING_THRESHOLD; - - // How often should we perform full bound tightening, on the entire contraints matrix A. - static const unsigned BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY; - - // When the row bound tightener is asked to run until saturation, it can enter an infinite loop - // due to tiny increments in bounds. This number limits the number of iterations it can perform. - static const unsigned ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS; - - // If the cost function error exceeds this threshold, it is recomputed - static const double COST_FUNCTION_ERROR_THRESHOLD; - - // Random seed for generating simulation values. - static const unsigned SIMULATION_RANDOM_SEED; - - // Random seed for EstimateVolume procedure (PreimageApproximation). - static const unsigned VOLUME_ESTIMATION_RANDOM_SEED; - - // Number of iterations for EstimateVolume procedure (PreimageApproximation). - static const unsigned VOLUME_ESTIMATION_ITERATIONS; - - // Random seed for PreimageApproximation optimization. - static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED; - - // Maximum iterations for PreimageApproximation optimization. - static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; - - // Step size for PreimageApproximation optimization. - static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; - - // How often should projected steepest edge reset the reference space? - static const unsigned PSE_ITERATIONS_BEFORE_RESET; - - // An error threshold which, when crossed, causes projected steepest edge to reset the reference - // space - static const double PSE_GAMMA_ERROR_THRESHOLD; - - // PSE's Gamma function's update tolerance - static const double PSE_GAMMA_UPDATE_TOLERANCE; - - // The tolerance for checking whether f = Constraint( b ), Constraint \in { ReLU, ABS, Sign} - static const double CONSTRAINT_COMPARISON_TOLERANCE; - - // Toggle between two types of LSE lower bound for softmax - static const double SOFTMAX_LSE2_THRESHOLD; - - // Should the initial basis be comprised only of auxiliary (row) variables? - static const bool ONLY_AUX_INITIAL_BASIS; - - /* - Explicit (Reluplex-style) bound tightening options - */ - - enum ExplicitBasisBoundTighteningType { - // Compute the inverse basis matrix and use it - COMPUTE_INVERTED_BASIS_MATRIX = 0, - // Use the inverted basis matrix without using it, via transformations - USE_IMPLICIT_INVERTED_BASIS_MATRIX = 1, - // Disable explicit basis bound tightening - DISABLE_EXPLICIT_BASIS_TIGHTENING = 2, - }; - - // When doing bound tightening using the explicit basis matrix, should the basis matrix be - // inverted? - static const ExplicitBasisBoundTighteningType EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE; - static const double EXPLICIT_BASIS_BOUND_TIGHTENING_ROUNDING_CONSTANT; - - // When doing explicit bound tightening, should we repeat until saturation? - static const bool EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION; - - /* - Symbolic bound tightening options - */ - - // Symbolic tightening, LP rounding constants - static const double SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; - static const double LP_TIGHTENING_ROUNDING_CONSTANT; - - static const double SIGMOID_CUTOFF_CONSTANT; - - /* - Constraint fixing heuristics - */ - - // When a PL constraint proposes a fix that affects multiple variables, should it first query - // for any relevant linear connections between the variables? - static const bool USE_SMART_FIX; - - // A heuristic for selecting which of the broken PL constraints will be repaired next. In this - // case, the one that has been repaired the least number of times so far. - static const bool USE_LEAST_FIX; - - /* - Basis factorization options - */ - - // The number of accumualted eta matrices, after which the basis will be refactorized - static const unsigned REFACTORIZATION_THRESHOLD; - - // The kind of basis factorization algorithm in use - enum BasisFactorizationType { - LU_FACTORIZATION, - SPARSE_LU_FACTORIZATION, - FORREST_TOMLIN_FACTORIZATION, - SPARSE_FORREST_TOMLIN_FACTORIZATION, - }; - static const BasisFactorizationType BASIS_FACTORIZATION_TYPE; - - /* In the polarity-based branching heuristics, only this many earliest nodes - are considered to branch on. - */ - static const unsigned POLARITY_CANDIDATES_THRESHOLD; - - /* The max number of DnC splits - */ - static const unsigned DNC_DEPTH_THRESHOLD; - - /* Minimal coefficient of a variable in a Tableau row, that is used for bound tightening - */ - static const double MINIMAL_COEFFICIENT_FOR_TIGHTENING; - - /* The tolerance of errors when checking lemmas in the proof-checking process - */ - static const double LEMMA_CERTIFICATION_TOLERANCE; - - /* Denote whether proofs should be written as a JSON file - */ - static const bool WRITE_JSON_PROOF; - - /* How many layers after the current layer do we encode in backward analysis. - */ - static const unsigned BACKWARD_BOUND_PROPAGATION_DEPTH; - - /* How many rounds of backward analysis to perform? - */ - static const unsigned MAX_ROUNDS_OF_BACKWARD_ANALYSIS; - -#ifdef ENABLE_GUROBI - /* - The number of threads Gurobi spawns - */ - static const unsigned GUROBI_NUMBER_OF_THREADS; - static const bool GUROBI_LOGGING; -#endif // ENABLE_GUROBI - - /* - Logging options - */ - static const bool DNC_MANAGER_LOGGING; - static const bool ENGINE_LOGGING; - static const bool TABLEAU_LOGGING; - static const bool SMT_CORE_LOGGING; - static const bool DANTZIGS_RULE_LOGGING; - static const bool BASIS_FACTORIZATION_LOGGING; - static const bool PREPROCESSOR_LOGGING; - static const bool INPUT_QUERY_LOGGING; - static const bool PROJECTED_STEEPEST_EDGE_LOGGING; - static const bool GAUSSIAN_ELIMINATION_LOGGING; - static const bool QUERY_LOADER_LOGGING; - static const bool SYMBOLIC_BOUND_TIGHTENER_LOGGING; - static const bool NETWORK_LEVEL_REASONER_LOGGING; - static const bool MPS_PARSER_LOGGING; - static const bool ONNX_PARSER_LOGGING; - static const bool SOI_LOGGING; - static const bool SCORE_TRACKER_LOGGING; - static const bool CEGAR_LOGGING; -}; - -#endif // __GlobalConfiguration_h__ - -// -// Local Variables: -// compile-command: "make -C .. " -// tags-file-name: "../TAGS" -// c-basic-offset: 4 -// End: -// From 81e28b7835a2a931dbd99ddeea1c5e141eabcb80 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Dec 2024 20:57:48 +0200 Subject: [PATCH 32/81] Delete src/nlr/GlobalConfiguration.cpp --- src/nlr/GlobalConfiguration.cpp | 238 -------------------------------- 1 file changed, 238 deletions(-) delete mode 100644 src/nlr/GlobalConfiguration.cpp diff --git a/src/nlr/GlobalConfiguration.cpp b/src/nlr/GlobalConfiguration.cpp deleted file mode 100644 index 3a08b92234..0000000000 --- a/src/nlr/GlobalConfiguration.cpp +++ /dev/null @@ -1,238 +0,0 @@ -/********************* */ -/*! \file GlobalConfiguration.cpp - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz, Parth Shah, Derek Huang - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - - **/ - -#include "GlobalConfiguration.h" - -#include "DivideStrategy.h" -#include "MString.h" - -#include - - -// The exponential moving average is calculated as -// ema = current * alpha + previous * (1 - alpha) -const double GlobalConfiguration::EXPONENTIAL_MOVING_AVERAGE_ALPHA = 0.5; - -// Whether to use SoI instead of Reluplex for local search for satisfying assignments -// to non-linear constraint. -bool GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH = true; - -const double GlobalConfiguration::SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI = 5; - -// Use the polarity metrics to decide which branch to take first in a case split -// and how to repair a ReLU constraint. -const bool GlobalConfiguration::USE_POLARITY_BASED_DIRECTION_HEURISTICS = true; - -const double GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS = 0.0000000001; -const unsigned GlobalConfiguration::DEFAULT_DOUBLE_TO_STRING_PRECISION = 10; -const unsigned GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY = 10000; -const unsigned GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY_GUROBI = 100; -const double GlobalConfiguration::BOUND_COMPARISON_ADDITIVE_TOLERANCE = 0.0000001; -const double GlobalConfiguration::BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE = 0.001 * 0.0000001; -const double GlobalConfiguration::PIVOT_CHANGE_COLUMN_TOLERANCE = 0.000000001; -const double GlobalConfiguration::PIVOT_ROW_AND_COLUMN_TOLERANCE = 0.01; -const double GlobalConfiguration::ENTRY_ELIGIBILITY_TOLERANCE = 0.00000001; -const double GlobalConfiguration::RATIO_CONSTRAINT_ADDITIVE_TOLERANCE = 0.0000001 * 0.3; -const double GlobalConfiguration::RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE = - 0.001 * 0.0000001 * 0.3; -const double GlobalConfiguration::HARRIS_RATIO_CONSTRAINT_ADDITIVE_TOLERANCE = 0.0000001 * 0.5; -const double GlobalConfiguration::HARRIS_RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE = - 0.001 * 0.0000001 * 0.5; -const double GlobalConfiguration::BASIC_COSTS_ADDITIVE_TOLERANCE = 0.0000001; -const double GlobalConfiguration::BASIC_COSTS_MULTIPLICATIVE_TOLERANCE = 0.001 * 0.0000001; -const double GlobalConfiguration::SPARSE_FORREST_TOMLIN_DIAGONAL_ELEMENT_TOLERANCE = 0.00001; -const unsigned GlobalConfiguration::DEGRADATION_CHECKING_FREQUENCY = 100; -const double GlobalConfiguration::DEGRADATION_THRESHOLD = 0.1; -const double GlobalConfiguration::ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD = 0.0001; -const bool GlobalConfiguration::USE_COLUMN_MERGING_EQUATIONS = false; -const double GlobalConfiguration::GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD = 0.1; -const unsigned GlobalConfiguration::MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS = 5; -const DivideStrategy GlobalConfiguration::SPLITTING_HEURISTICS = DivideStrategy::ReLUViolation; -const unsigned GlobalConfiguration::INTERVAL_SPLITTING_FREQUENCY = 10; -const unsigned GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD = 10; -const unsigned GlobalConfiguration::BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY = 100; -const unsigned GlobalConfiguration::ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS = 20; -const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; - -const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; -const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; -const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; -const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 1000; -const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 100; -const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.1; -const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.05; - -const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; - -const double GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT = 0.00000000001; -const double GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT = 0.00000001; - -const double GlobalConfiguration::SIGMOID_CUTOFF_CONSTANT = 20; - -const bool GlobalConfiguration::PREPROCESS_INPUT_QUERY = true; -const bool GlobalConfiguration::PREPROCESSOR_ELIMINATE_VARIABLES = true; -const bool GlobalConfiguration::PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; -const bool GlobalConfiguration::NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; -const double GlobalConfiguration::PREPROCESSOR_ALMOST_FIXED_THRESHOLD = 0.00001; - -const unsigned GlobalConfiguration::PREPROCESSSING_MAX_TIGHTEING_ROUND = 10; - -const bool GlobalConfiguration::WARM_START = false; - -const unsigned GlobalConfiguration::MAX_ITERATIONS_WITHOUT_PROGRESS = 10000; - -const unsigned GlobalConfiguration::PSE_ITERATIONS_BEFORE_RESET = 1000; -const double GlobalConfiguration::PSE_GAMMA_ERROR_THRESHOLD = 0.001; -const double GlobalConfiguration::PSE_GAMMA_UPDATE_TOLERANCE = 0.000000001; - -const double GlobalConfiguration::CONSTRAINT_COMPARISON_TOLERANCE = 0.00001; - -const double GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD = 0.6; - -const bool GlobalConfiguration::ONLY_AUX_INITIAL_BASIS = false; - -const GlobalConfiguration::ExplicitBasisBoundTighteningType - GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE = - GlobalConfiguration::COMPUTE_INVERTED_BASIS_MATRIX; -const bool GlobalConfiguration::EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION = false; -const double GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_ROUNDING_CONSTANT = 1e-6; - -const unsigned GlobalConfiguration::REFACTORIZATION_THRESHOLD = 100; -const GlobalConfiguration::BasisFactorizationType GlobalConfiguration::BASIS_FACTORIZATION_TYPE = - GlobalConfiguration::SPARSE_FORREST_TOMLIN_FACTORIZATION; - -const unsigned GlobalConfiguration::POLARITY_CANDIDATES_THRESHOLD = 5; - -const unsigned GlobalConfiguration::DNC_DEPTH_THRESHOLD = 5; - -const double GlobalConfiguration::MINIMAL_COEFFICIENT_FOR_TIGHTENING = 0.01; -const double GlobalConfiguration::LEMMA_CERTIFICATION_TOLERANCE = 0.000001; -const bool GlobalConfiguration::WRITE_JSON_PROOF = false; - -const unsigned GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH = 3; -const unsigned GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS = 10; - -#ifdef ENABLE_GUROBI -const unsigned GlobalConfiguration::GUROBI_NUMBER_OF_THREADS = 1; -const bool GlobalConfiguration::GUROBI_LOGGING = false; -#endif // ENABLE_GUROBI - -// Logging - note that it is enabled only in Debug mode -const bool GlobalConfiguration::DNC_MANAGER_LOGGING = false; -const bool GlobalConfiguration::ENGINE_LOGGING = false; -const bool GlobalConfiguration::TABLEAU_LOGGING = false; -const bool GlobalConfiguration::SMT_CORE_LOGGING = false; -const bool GlobalConfiguration::DANTZIGS_RULE_LOGGING = false; -const bool GlobalConfiguration::BASIS_FACTORIZATION_LOGGING = false; -const bool GlobalConfiguration::PREPROCESSOR_LOGGING = false; -const bool GlobalConfiguration::INPUT_QUERY_LOGGING = false; -const bool GlobalConfiguration::PROJECTED_STEEPEST_EDGE_LOGGING = false; -const bool GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING = false; -const bool GlobalConfiguration::QUERY_LOADER_LOGGING = false; -const bool GlobalConfiguration::SYMBOLIC_BOUND_TIGHTENER_LOGGING = false; -const bool GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING = false; -const bool GlobalConfiguration::MPS_PARSER_LOGGING = false; -const bool GlobalConfiguration::ONNX_PARSER_LOGGING = false; -const bool GlobalConfiguration::SOI_LOGGING = false; -const bool GlobalConfiguration::SCORE_TRACKER_LOGGING = false; -const bool GlobalConfiguration::CEGAR_LOGGING = false; - -const bool GlobalConfiguration::USE_SMART_FIX = false; -const bool GlobalConfiguration::USE_LEAST_FIX = false; - -void GlobalConfiguration::print() -{ - printf( "****************************\n" ); - printf( "*** Global Configuraiton ***\n" ); - printf( "****************************\n" ); - printf( " DEFAULT_EPSILON_FOR_COMPARISONS: %.15lf\n", DEFAULT_EPSILON_FOR_COMPARISONS ); - printf( " DEFAULT_DOUBLE_TO_STRING_PRECISION: %u\n", DEFAULT_DOUBLE_TO_STRING_PRECISION ); - printf( " STATISTICS_PRINTING_FREQUENCY: %u\n", STATISTICS_PRINTING_FREQUENCY ); - printf( " BOUND_COMPARISON_ADDITIVE_TOLERANCE: %.15lf\n", - BOUND_COMPARISON_ADDITIVE_TOLERANCE ); - printf( " BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE: %.15lf\n", - BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE ); - printf( " PIVOT_CHANGE_COLUMN_TOLERANCE: %.15lf\n", PIVOT_CHANGE_COLUMN_TOLERANCE ); - printf( " RATIO_CONSTRAINT_ADDITIVE_TOLERANCE: %.15lf\n", - RATIO_CONSTRAINT_ADDITIVE_TOLERANCE ); - printf( " RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE: %.15lf\n", - RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE ); - printf( " BASIC_COSTS_ADDITIVE_TOLERANCE: %.15lf\n", BASIC_COSTS_ADDITIVE_TOLERANCE ); - printf( " BASIC_COSTS_MULTIPLICATIVE_TOLERANCE: %.15lf\n", - BASIC_COSTS_MULTIPLICATIVE_TOLERANCE ); - printf( " DEGRADATION_CHECKING_FREQUENCY: %u\n", DEGRADATION_CHECKING_FREQUENCY ); - printf( " DEGRADATION_THRESHOLD: %.15lf\n", DEGRADATION_THRESHOLD ); - printf( " ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD: %.15lf\n", ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD ); - printf( " USE_COLUMN_MERGING_EQUATIONS: %s\n", USE_COLUMN_MERGING_EQUATIONS ? "Yes" : "No" ); - printf( " GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD: %.15lf\n", - GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD ); - printf( " MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS: %u\n", MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS ); - printf( " BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY: %u\n", - BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY ); - printf( " COST_FUNCTION_ERROR_THRESHOLD: %.15lf\n", COST_FUNCTION_ERROR_THRESHOLD ); - printf( " USE_HARRIS_RATIO_TEST: %s\n", USE_HARRIS_RATIO_TEST ? "Yes" : "No" ); - - printf( " PREPROCESS_INPUT_QUERY: %s\n", PREPROCESS_INPUT_QUERY ? "Yes" : "No" ); - printf( " PREPROCESSOR_ELIMINATE_VARIABLES: %s\n", - PREPROCESSOR_ELIMINATE_VARIABLES ? "Yes" : "No" ); - printf( " PSE_ITERATIONS_BEFORE_RESET: %u\n", PSE_ITERATIONS_BEFORE_RESET ); - printf( " PSE_GAMMA_ERROR_THRESHOLD: %.15lf\n", PSE_GAMMA_ERROR_THRESHOLD ); - printf( " CONSTRAINT_COMPARISON_TOLERANCE: %.15lf\n", CONSTRAINT_COMPARISON_TOLERANCE ); - - String basisBoundTighteningType; - switch ( EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE ) - { - case COMPUTE_INVERTED_BASIS_MATRIX: - basisBoundTighteningType = "Compute inverted basis matrix"; - break; - - case USE_IMPLICIT_INVERTED_BASIS_MATRIX: - basisBoundTighteningType = "Use implicit inverted basis matrix"; - break; - - default: - basisBoundTighteningType = "Unknown"; - break; - } - - printf( " EXPLICIT_BASIS_BOUND_TIGHTENING_INVERT_BASIS: %s\n", - basisBoundTighteningType.ascii() ); - printf( " EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION: %s\n", - EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION ? "Yes" : "No" ); - printf( " REFACTORIZATION_THRESHOLD: %u\n", REFACTORIZATION_THRESHOLD ); - - String basisFactorizationType; - if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == GlobalConfiguration::LU_FACTORIZATION ) - basisFactorizationType = "LU_FACTORIZATION"; - else if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == - GlobalConfiguration::SPARSE_LU_FACTORIZATION ) - basisFactorizationType = "SPARSE_LU_FACTORIZATION"; - else if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == - GlobalConfiguration::FORREST_TOMLIN_FACTORIZATION ) - basisFactorizationType = "FORREST_TOMLIN_FACTORIZATION"; - else - basisFactorizationType = "Unknown"; - - printf( " BASIS_FACTORIZATION_TYPE: %s\n", basisFactorizationType.ascii() ); - printf( "****************************\n" ); -} - -// -// Local Variables: -// compile-command: "make -C ../.. " -// tags-file-name: "../../TAGS" -// c-basic-offset: 4 -// End: -// From dea156a2e86159a90b57431f7d4ba114365ec379 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Dec 2024 20:58:07 +0200 Subject: [PATCH 33/81] Delete src/nlr/Engine.cpp --- src/nlr/Engine.cpp | 3813 -------------------------------------------- 1 file changed, 3813 deletions(-) delete mode 100644 src/nlr/Engine.cpp diff --git a/src/nlr/Engine.cpp b/src/nlr/Engine.cpp deleted file mode 100644 index e137c3cbb9..0000000000 --- a/src/nlr/Engine.cpp +++ /dev/null @@ -1,3813 +0,0 @@ -/********************* */ -/*! \file Engine.cpp - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz, Duligur Ibeling, Andrew Wu, Omri Isac - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** \brief [[ Add one-line brief description here ]] - ** - ** [[ Add lengthier description here ]] - **/ - -#include "Engine.h" - -#include "AutoConstraintMatrixAnalyzer.h" -#include "Debug.h" -#include "DisjunctionConstraint.h" -#include "EngineState.h" -#include "InfeasibleQueryException.h" -#include "MStringf.h" -#include "MalformedBasisException.h" -#include "MarabouError.h" -#include "NLRError.h" -#include "PiecewiseLinearConstraint.h" -#include "Preprocessor.h" -#include "Query.h" -#include "TableauRow.h" -#include "TimeUtils.h" -#include "VariableOutOfBoundDuringOptimizationException.h" -#include "Vector.h" - -#include - -Engine::Engine() - : _context() - , _boundManager( _context ) - , _tableau( _boundManager ) - , _preprocessedQuery( nullptr ) - , _rowBoundTightener( *_tableau ) - , _smtCore( this ) - , _numPlConstraintsDisabledByValidSplits( 0 ) - , _preprocessingEnabled( false ) - , _initialStateStored( false ) - , _work( NULL ) - , _basisRestorationRequired( Engine::RESTORATION_NOT_NEEDED ) - , _basisRestorationPerformed( Engine::NO_RESTORATION_PERFORMED ) - , _costFunctionManager( _tableau ) - , _quitRequested( false ) - , _exitCode( Engine::NOT_DONE ) - , _numVisitedStatesAtPreviousRestoration( 0 ) - , _networkLevelReasoner( NULL ) - , _verbosity( Options::get()->getInt( Options::VERBOSITY ) ) - , _lastNumVisitedStates( 0 ) - , _lastIterationWithProgress( 0 ) - , _symbolicBoundTighteningType( Options::get()->getSymbolicBoundTighteningType() ) - , _solveWithMILP( Options::get()->getBool( Options::SOLVE_WITH_MILP ) ) - , _lpSolverType( Options::get()->getLPSolverType() ) - , _gurobi( nullptr ) - , _milpEncoder( nullptr ) - , _soiManager( nullptr ) - , _simulationSize( Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ) ) - , _isGurobyEnabled( Options::get()->gurobiEnabled() ) - , _performLpTighteningAfterSplit( - Options::get()->getBool( Options::PERFORM_LP_TIGHTENING_AFTER_SPLIT ) ) - , _milpSolverBoundTighteningType( Options::get()->getMILPSolverBoundTighteningType() ) - , _sncMode( false ) - , _queryId( "" ) - , _produceUNSATProofs( Options::get()->getBool( Options::PRODUCE_PROOFS ) ) - , _groundBoundManager( _context ) - , _UNSATCertificate( NULL ) -{ - _smtCore.setStatistics( &_statistics ); - _tableau->setStatistics( &_statistics ); - _rowBoundTightener->setStatistics( &_statistics ); - _preprocessor.setStatistics( &_statistics ); - - _activeEntryStrategy = _projectedSteepestEdgeRule; - _activeEntryStrategy->setStatistics( &_statistics ); - _statistics.stampStartingTime(); - setRandomSeed( Options::get()->getInt( Options::SEED ) ); - - _boundManager.registerEngine( this ); - _groundBoundManager.registerEngine( this ); - _statisticsPrintingFrequency = ( _lpSolverType == LPSolverType::NATIVE ) - ? GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY - : GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY_GUROBI; - - _UNSATCertificateCurrentPointer = - _produceUNSATProofs ? new ( true ) - CVC4::context::CDO( &_context, NULL ) - : NULL; -} - -Engine::~Engine() -{ - if ( _work ) - { - delete[] _work; - _work = NULL; - } - - if ( _UNSATCertificate ) - { - delete _UNSATCertificate; - _UNSATCertificate = NULL; - } - - if ( _produceUNSATProofs && _UNSATCertificateCurrentPointer ) - _UNSATCertificateCurrentPointer->deleteSelf(); -} - -void Engine::setVerbosity( unsigned verbosity ) -{ - _verbosity = verbosity; -} - -void Engine::adjustWorkMemorySize() -{ - if ( _work ) - { - delete[] _work; - _work = NULL; - } - - _work = new double[_tableau->getM()]; - if ( !_work ) - throw MarabouError( MarabouError::ALLOCATION_FAILED, "Engine::work" ); -} - -void Engine::applySnCSplit( PiecewiseLinearCaseSplit sncSplit, String queryId ) -{ - _sncMode = true; - _sncSplit = sncSplit; - _queryId = queryId; - preContextPushHook(); - _smtCore.pushContext(); - applySplit( sncSplit ); - _boundManager.propagateTightenings(); -} - -bool Engine::inSnCMode() const -{ - return _sncMode; -} - -void Engine::setRandomSeed( unsigned seed ) -{ - srand( seed ); -} - -Query Engine::prepareSnCQuery() -{ - List bounds = _sncSplit.getBoundTightenings(); - List equations = _sncSplit.getEquations(); - - Query sncIPQ = *_preprocessedQuery; - for ( auto &equation : equations ) - sncIPQ.addEquation( equation ); - - for ( auto &bound : bounds ) - { - switch ( bound._type ) - { - case Tightening::LB: - sncIPQ.setLowerBound( bound._variable, bound._value ); - break; - - case Tightening::UB: - sncIPQ.setUpperBound( bound._variable, bound._value ); - } - } - - return sncIPQ; -} - -void Engine::exportQueryWithError( String errorMessage ) -{ - String ipqFileName = ( _queryId.length() > 0 ) ? _queryId + ".ipq" : "failedInputQuery.ipq"; - prepareSnCQuery().saveQuery( ipqFileName ); - printf( "Engine: %s!\nInput query has been saved as %s. Please attach the input query when you " - "open the issue on GitHub.\n", - errorMessage.ascii(), - ipqFileName.ascii() ); -} - -bool Engine::solve( double timeoutInSeconds ) -{ - SignalHandler::getInstance()->initialize(); - SignalHandler::getInstance()->registerClient( this ); - - // Register the boundManager with all the PL constraints - for ( auto &plConstraint : _plConstraints ) - plConstraint->registerBoundManager( &_boundManager ); - for ( auto &nlConstraint : _nlConstraints ) - nlConstraint->registerBoundManager( &_boundManager ); - - // Before encoding, make sure all valid constraints are applied. - applyAllValidConstraintCaseSplits(); - - if ( _solveWithMILP ) - return solveWithMILPEncoding( timeoutInSeconds ); - - updateDirections(); - if ( _lpSolverType == LPSolverType::NATIVE ) - storeInitialEngineState(); - else if ( _lpSolverType == LPSolverType::GUROBI ) - { - ENGINE_LOG( "Encoding convex relaxation into Gurobi..." ); - _gurobi = std::unique_ptr( new GurobiWrapper() ); - _tableau->setGurobi( &( *_gurobi ) ); - _milpEncoder = std::unique_ptr( new MILPEncoder( *_tableau ) ); - _milpEncoder->setStatistics( &_statistics ); - _milpEncoder->encodeQuery( *_gurobi, *_preprocessedQuery, true ); - ENGINE_LOG( "Encoding convex relaxation into Gurobi - done" ); - } - - mainLoopStatistics(); - if ( _verbosity > 0 ) - { - printf( "\nEngine::solve: Initial statistics\n" ); - _statistics.print(); - printf( "\n---\n" ); - } - - bool splitJustPerformed = true; - struct timespec mainLoopStart = TimeUtils::sampleMicro(); - while ( true ) - { - struct timespec mainLoopEnd = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, - TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); - mainLoopStart = mainLoopEnd; - - if ( shouldExitDueToTimeout( timeoutInSeconds ) ) - { - if ( _verbosity > 0 ) - { - printf( "\n\nEngine: quitting due to timeout...\n\n" ); - printf( "Final statistics:\n" ); - _statistics.print(); - } - - _exitCode = Engine::TIMEOUT; - _statistics.timeout(); - return false; - } - - if ( _quitRequested ) - { - if ( _verbosity > 0 ) - { - printf( "\n\nEngine: quitting due to external request...\n\n" ); - printf( "Final statistics:\n" ); - _statistics.print(); - } - - _exitCode = Engine::QUIT_REQUESTED; - return false; - } - - try - { - DEBUG( _tableau->verifyInvariants() ); - - mainLoopStatistics(); - if ( _verbosity > 1 && - _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ) % - _statisticsPrintingFrequency == - 0 ) - _statistics.print(); - - if ( _lpSolverType == LPSolverType::NATIVE ) - { - checkOverallProgress(); - // Check whether progress has been made recently - - if ( performPrecisionRestorationIfNeeded() ) - continue; - - if ( _tableau->basisMatrixAvailable() ) - { - explicitBasisBoundTightening(); - _boundManager.propagateTightenings(); - applyAllValidConstraintCaseSplits(); - } - } - - // If true, we just entered a new subproblem - if ( splitJustPerformed ) - { - performBoundTighteningAfterCaseSplit(); - informLPSolverOfBounds(); - splitJustPerformed = false; - } - - // Perform any SmtCore-initiated case splits - if ( _smtCore.needToSplit() ) - { - _smtCore.performSplit(); - splitJustPerformed = true; - continue; - } - - if ( !_tableau->allBoundsValid() ) - { - // Some variable bounds are invalid, so the query is unsat - throw InfeasibleQueryException(); - } - - if ( allVarsWithinBounds() ) - { - // It's possible that a disjunction constraint is fixed and additional constraints - // are introduced, making the linear portion unsatisfied. So we need to make sure - // there are no valid case splits that we do not know of. - applyAllBoundTightenings(); - if ( applyAllValidConstraintCaseSplits() ) - continue; - - // The linear portion of the problem has been solved. - // Check the status of the PL constraints - bool solutionFound = adjustAssignmentToSatisfyNonLinearConstraints(); - if ( solutionFound ) - { - if ( allNonlinearConstraintsHold() ) - { - mainLoopEnd = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( - Statistics::TIME_MAIN_LOOP_MICRO, - TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); - if ( _verbosity > 0 ) - { - printf( "\nEngine::solve: sat assignment found\n" ); - _statistics.print(); - } - - // Allows checking proofs produced for UNSAT leaves of satisfiable query - // search tree - if ( _produceUNSATProofs ) - { - ASSERT( _UNSATCertificateCurrentPointer ); - ( **_UNSATCertificateCurrentPointer ).setSATSolutionFlag(); - } - _exitCode = Engine::SAT; - return true; - } - else if ( !hasBranchingCandidate() ) - { - mainLoopEnd = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( - Statistics::TIME_MAIN_LOOP_MICRO, - TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); - if ( _verbosity > 0 ) - { - printf( "\nEngine::solve: at leaf node but solving inconclusive\n" ); - _statistics.print(); - } - _exitCode = Engine::UNKNOWN; - return false; - } - else - { - while ( !_smtCore.needToSplit() ) - _smtCore.reportRejectedPhasePatternProposal(); - continue; - } - } - else - { - continue; - } - } - - // We have out-of-bounds variables. - if ( _lpSolverType == LPSolverType::NATIVE ) - performSimplexStep(); - else - { - ENGINE_LOG( "Checking LP feasibility with Gurobi..." ); - DEBUG( { checkGurobiBoundConsistency(); } ); - ASSERT( _lpSolverType == LPSolverType::GUROBI ); - LinearExpression dontCare; - minimizeCostWithGurobi( dontCare ); - } - continue; - } - catch ( const MalformedBasisException & ) - { - _tableau->toggleOptimization( false ); - if ( !handleMalformedBasisException() ) - { - ASSERT( _lpSolverType == LPSolverType::NATIVE ); - _exitCode = Engine::ERROR; - exportQueryWithError( "Cannot restore tableau" ); - mainLoopEnd = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, - TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); - return false; - } - } - catch ( const InfeasibleQueryException & ) - { - _tableau->toggleOptimization( false ); - // The current query is unsat, and we need to pop. - // If we're at level 0, the whole query is unsat. - if ( _produceUNSATProofs ) - explainSimplexFailure(); - - if ( !_smtCore.popSplit() ) - { - mainLoopEnd = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, - TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); - if ( _verbosity > 0 ) - { - printf( "\nEngine::solve: unsat query\n" ); - _statistics.print(); - } - _exitCode = Engine::UNSAT; - return false; - } - else - { - splitJustPerformed = true; - } - } - catch ( const VariableOutOfBoundDuringOptimizationException & ) - { - _tableau->toggleOptimization( false ); - continue; - } - catch ( MarabouError &e ) - { - String message = Stringf( - "Caught a MarabouError. Code: %u. Message: %s ", e.getCode(), e.getUserMessage() ); - _exitCode = Engine::ERROR; - exportQueryWithError( message ); - mainLoopEnd = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, - TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); - return false; - } - catch ( ... ) - { - _exitCode = Engine::ERROR; - exportQueryWithError( "Unknown error" ); - mainLoopEnd = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, - TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); - return false; - } - } -} - -void Engine::mainLoopStatistics() -{ - struct timespec start = TimeUtils::sampleMicro(); - - unsigned activeConstraints = 0; - for ( const auto &constraint : _plConstraints ) - if ( constraint->isActive() ) - ++activeConstraints; - - _statistics.setUnsignedAttribute( Statistics::NUM_ACTIVE_PL_CONSTRAINTS, activeConstraints ); - _statistics.setUnsignedAttribute( Statistics::NUM_PL_VALID_SPLITS, - _numPlConstraintsDisabledByValidSplits ); - _statistics.setUnsignedAttribute( Statistics::NUM_PL_SMT_ORIGINATED_SPLITS, - _plConstraints.size() - activeConstraints - - _numPlConstraintsDisabledByValidSplits ); - - _statistics.incLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_HANDLING_STATISTICS_MICRO, - TimeUtils::timePassed( start, end ) ); -} - -void Engine::performBoundTighteningAfterCaseSplit() -{ - // Tighten bounds of a first hidden layer with MILP solver - performMILPSolverBoundedTighteningForSingleLayer( 1 ); - do - { - performSymbolicBoundTightening(); - } - while ( applyAllValidConstraintCaseSplits() ); - - // Tighten bounds of an output layer with MILP solver - if ( _networkLevelReasoner ) // to avoid failing of system test. - performMILPSolverBoundedTighteningForSingleLayer( - _networkLevelReasoner->getLayerIndexToLayer().size() - 1 ); -} - -bool Engine::adjustAssignmentToSatisfyNonLinearConstraints() -{ - ENGINE_LOG( "Linear constraints satisfied. Now trying to satisfy non-linear" - " constraints..." ); - collectViolatedPlConstraints(); - - // If all constraints are satisfied, we are possibly done - if ( allPlConstraintsHold() ) - { - if ( _lpSolverType == LPSolverType::NATIVE && - _tableau->getBasicAssignmentStatus() != ITableau::BASIC_ASSIGNMENT_JUST_COMPUTED ) - { - if ( _verbosity > 0 ) - { - printf( "Before declaring sat, recomputing...\n" ); - } - // Make sure that the assignment is precise before declaring success - _tableau->computeAssignment(); - // If we actually have a real satisfying assignment, - return false; - } - else - return true; - } - else if ( !GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH ) - { - // We have violated piecewise-linear constraints. - performConstraintFixingStep(); - - // Finally, take this opporunity to tighten any bounds - // and perform any valid case splits. - tightenBoundsOnConstraintMatrix(); - _boundManager.propagateTightenings(); - // For debugging purposes - checkBoundCompliancyWithDebugSolution(); - - while ( applyAllValidConstraintCaseSplits() ) - performSymbolicBoundTightening(); - return false; - } - else - { - return performDeepSoILocalSearch(); - } -} - -bool Engine::performPrecisionRestorationIfNeeded() -{ - // If the basis has become malformed, we need to restore it - if ( basisRestorationNeeded() ) - { - if ( _basisRestorationRequired == Engine::STRONG_RESTORATION_NEEDED ) - { - performPrecisionRestoration( PrecisionRestorer::RESTORE_BASICS ); - _basisRestorationPerformed = Engine::PERFORMED_STRONG_RESTORATION; - } - else - { - performPrecisionRestoration( PrecisionRestorer::DO_NOT_RESTORE_BASICS ); - _basisRestorationPerformed = Engine::PERFORMED_WEAK_RESTORATION; - } - - _numVisitedStatesAtPreviousRestoration = - _statistics.getUnsignedAttribute( Statistics::NUM_VISITED_TREE_STATES ); - _basisRestorationRequired = Engine::RESTORATION_NOT_NEEDED; - return true; - } - - // Restoration is not required - _basisRestorationPerformed = Engine::NO_RESTORATION_PERFORMED; - - // Possible restoration due to preceision degradation - if ( shouldCheckDegradation() && highDegradation() ) - { - performPrecisionRestoration( PrecisionRestorer::RESTORE_BASICS ); - return true; - } - - return false; -} - -bool Engine::handleMalformedBasisException() -{ - // Debug - printf( "MalformedBasisException caught!\n" ); - // - - if ( _basisRestorationPerformed == Engine::NO_RESTORATION_PERFORMED ) - { - if ( _numVisitedStatesAtPreviousRestoration != - _statistics.getUnsignedAttribute( Statistics::NUM_VISITED_TREE_STATES ) ) - { - // We've tried a strong restoration before, and it didn't work. Do a weak restoration - _basisRestorationRequired = Engine::WEAK_RESTORATION_NEEDED; - } - else - { - _basisRestorationRequired = Engine::STRONG_RESTORATION_NEEDED; - } - return true; - } - else if ( _basisRestorationPerformed == Engine::PERFORMED_STRONG_RESTORATION ) - { - _basisRestorationRequired = Engine::WEAK_RESTORATION_NEEDED; - return true; - } - else - return false; -} - -void Engine::performConstraintFixingStep() -{ - // Statistics - _statistics.incLongAttribute( Statistics::NUM_CONSTRAINT_FIXING_STEPS ); - struct timespec start = TimeUtils::sampleMicro(); - - // Select a violated constraint as the target - selectViolatedPlConstraint(); - - // Report the violated constraint to the SMT engine - reportPlViolation(); - - // Attempt to fix the constraint - fixViolatedPlConstraintIfPossible(); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_CONSTRAINT_FIXING_STEPS_MICRO, - TimeUtils::timePassed( start, end ) ); -} - -bool Engine::performSimplexStep() -{ - // Statistics - _statistics.incLongAttribute( Statistics::NUM_SIMPLEX_STEPS ); - struct timespec start = TimeUtils::sampleMicro(); - - /* - In order to increase numerical stability, we attempt to pick a - "good" entering/leaving combination, by trying to avoid tiny pivot - values. We do this as follows: - - 1. Pick an entering variable according to the strategy in use. - 2. Find the entailed leaving variable. - 3. If the combination is bad, go back to (1) and find the - next-best entering variable. - */ - - if ( _tableau->isOptimizing() ) - _costFunctionManager->computeGivenCostFunction( _heuristicCost._addends ); - if ( _costFunctionManager->costFunctionInvalid() ) - _costFunctionManager->computeCoreCostFunction(); - else - _costFunctionManager->adjustBasicCostAccuracy(); - - DEBUG( { - // Since we're performing a simplex step, there are out-of-bounds variables. - // Therefore, if the cost function is fresh, it should not be zero. - if ( _costFunctionManager->costFunctionJustComputed() ) - { - const double *costFunction = _costFunctionManager->getCostFunction(); - unsigned size = _tableau->getN() - _tableau->getM(); - bool found = false; - for ( unsigned i = 0; i < size; ++i ) - { - if ( !FloatUtils::isZero( costFunction[i] ) ) - { - found = true; - break; - } - } - - if ( !found ) - { - printf( "Error! Have OOB vars but cost function is zero.\n" - "Recomputing cost function. New one is:\n" ); - _costFunctionManager->computeCoreCostFunction(); - _costFunctionManager->dumpCostFunction(); - throw MarabouError( MarabouError::DEBUGGING_ERROR, - "Have OOB vars but cost function is zero" ); - } - } - } ); - - // Obtain all eligible entering variables - List enteringVariableCandidates; - _tableau->getEntryCandidates( enteringVariableCandidates ); - - unsigned bestLeaving = 0; - double bestChangeRatio = 0.0; - Set excludedEnteringVariables; - bool haveCandidate = false; - unsigned bestEntering = 0; - double bestPivotEntry = 0.0; - unsigned tries = GlobalConfiguration::MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS; - - while ( tries > 0 ) - { - --tries; - - // Attempt to pick the best entering variable from the available candidates - if ( !_activeEntryStrategy->select( - _tableau, enteringVariableCandidates, excludedEnteringVariables ) ) - { - // No additional candidates can be found. - break; - } - - // We have a candidate! - haveCandidate = true; - - // We don't want to re-consider this candidate in future - // iterations - excludedEnteringVariables.insert( _tableau->getEnteringVariableIndex() ); - - // Pick a leaving variable - _tableau->computeChangeColumn(); - _tableau->pickLeavingVariable(); - - // A fake pivot always wins - if ( _tableau->performingFakePivot() ) - { - bestEntering = _tableau->getEnteringVariableIndex(); - bestLeaving = _tableau->getLeavingVariableIndex(); - bestChangeRatio = _tableau->getChangeRatio(); - memcpy( _work, _tableau->getChangeColumn(), sizeof( double ) * _tableau->getM() ); - break; - } - - // Is the newly found pivot better than the stored one? - unsigned leavingIndex = _tableau->getLeavingVariableIndex(); - double pivotEntry = FloatUtils::abs( _tableau->getChangeColumn()[leavingIndex] ); - if ( pivotEntry > bestPivotEntry ) - { - bestEntering = _tableau->getEnteringVariableIndex(); - bestPivotEntry = pivotEntry; - bestLeaving = leavingIndex; - bestChangeRatio = _tableau->getChangeRatio(); - memcpy( _work, _tableau->getChangeColumn(), sizeof( double ) * _tableau->getM() ); - } - - // If the pivot is greater than the sought-after threshold, we - // are done. - if ( bestPivotEntry >= GlobalConfiguration::ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD ) - break; - else - _statistics.incLongAttribute( - Statistics::NUM_SIMPLEX_PIVOT_SELECTIONS_IGNORED_FOR_STABILITY ); - } - - // If we don't have any candidates, this simplex step has failed. - if ( !haveCandidate ) - { - if ( _tableau->getBasicAssignmentStatus() != ITableau::BASIC_ASSIGNMENT_JUST_COMPUTED ) - { - // This failure might have resulted from a corrupt basic assignment. - _tableau->computeAssignment(); - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, - TimeUtils::timePassed( start, end ) ); - return false; - } - else if ( !_costFunctionManager->costFunctionJustComputed() ) - { - // This failure might have resulted from a corrupt cost function. - ASSERT( _costFunctionManager->getCostFunctionStatus() == - ICostFunctionManager::COST_FUNCTION_UPDATED ); - _costFunctionManager->invalidateCostFunction(); - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, - TimeUtils::timePassed( start, end ) ); - return false; - } - else - { - // Cost function is fresh --- failure is real. - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, - TimeUtils::timePassed( start, end ) ); - if ( _tableau->isOptimizing() ) - { - // The current solution is optimal. - return true; - } - else - throw InfeasibleQueryException(); - } - } - - // Set the best choice in the tableau - _tableau->setEnteringVariableIndex( bestEntering ); - _tableau->setLeavingVariableIndex( bestLeaving ); - _tableau->setChangeColumn( _work ); - _tableau->setChangeRatio( bestChangeRatio ); - - bool fakePivot = _tableau->performingFakePivot(); - - if ( !fakePivot && bestPivotEntry < GlobalConfiguration::ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD ) - { - /* - Despite our efforts, we are stuck with a small pivot. If basis factorization - isn't fresh, refresh it and terminate this step - perhaps in the next iteration - a better pivot will be found - */ - if ( !_tableau->basisMatrixAvailable() ) - { - _tableau->refreshBasisFactorization(); - return false; - } - - _statistics.incLongAttribute( Statistics::NUM_SIMPLEX_UNSTABLE_PIVOTS ); - } - - if ( !fakePivot ) - { - _tableau->computePivotRow(); - _rowBoundTightener->examinePivotRow(); - } - - // Perform the actual pivot - _activeEntryStrategy->prePivotHook( _tableau, fakePivot ); - _tableau->performPivot(); - _activeEntryStrategy->postPivotHook( _tableau, fakePivot ); - _boundManager.propagateTightenings(); - _costFunctionManager->invalidateCostFunction(); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, - TimeUtils::timePassed( start, end ) ); - return false; -} - -void Engine::fixViolatedPlConstraintIfPossible() -{ - List fixes; - - if ( GlobalConfiguration::USE_SMART_FIX ) - fixes = _plConstraintToFix->getSmartFixes( _tableau ); - else - fixes = _plConstraintToFix->getPossibleFixes(); - - // First, see if we can fix without pivoting. We are looking for a fix concerning a - // non-basic variable, that doesn't set that variable out-of-bounds. - for ( const auto &fix : fixes ) - { - if ( !_tableau->isBasic( fix._variable ) ) - { - if ( _tableau->checkValueWithinBounds( fix._variable, fix._value ) ) - { - _tableau->setNonBasicAssignment( fix._variable, fix._value, true ); - return; - } - } - } - - // No choice, have to pivot. Look for a fix concerning a basic variable, that - // doesn't set that variable out-of-bounds. If smart-fix is enabled and implemented, - // we should probably not reach this point. - bool found = false; - auto it = fixes.begin(); - while ( !found && it != fixes.end() ) - { - if ( _tableau->isBasic( it->_variable ) ) - { - if ( _tableau->checkValueWithinBounds( it->_variable, it->_value ) ) - { - found = true; - } - } - if ( !found ) - { - ++it; - } - } - - // If we couldn't find an eligible fix, give up - if ( !found ) - return; - - PiecewiseLinearConstraint::Fix fix = *it; - ASSERT( _tableau->isBasic( fix._variable ) ); - - TableauRow row( _tableau->getN() - _tableau->getM() ); - _tableau->getTableauRow( _tableau->variableToIndex( fix._variable ), &row ); - - // Pick the variable with the largest coefficient in this row for pivoting, - // to increase numerical stability. - unsigned bestCandidate = row._row[0]._var; - double bestValue = FloatUtils::abs( row._row[0]._coefficient ); - - unsigned n = _tableau->getN(); - unsigned m = _tableau->getM(); - for ( unsigned i = 1; i < n - m; ++i ) - { - double contenderValue = FloatUtils::abs( row._row[i]._coefficient ); - if ( FloatUtils::gt( contenderValue, bestValue ) ) - { - bestValue = contenderValue; - bestCandidate = row._row[i]._var; - } - } - - if ( FloatUtils::isZero( bestValue ) ) - { - // This can happen, e.g., if we have an equation x = 5, and is legal behavior. - return; - } - - // Switch between nonBasic and the variable we need to fix - _tableau->setEnteringVariableIndex( _tableau->variableToIndex( bestCandidate ) ); - _tableau->setLeavingVariableIndex( _tableau->variableToIndex( fix._variable ) ); - - // Make sure the change column and pivot row are up-to-date - strategies - // such as projected steepest edge need these for their internal updates. - _tableau->computeChangeColumn(); - _tableau->computePivotRow(); - - _activeEntryStrategy->prePivotHook( _tableau, false ); - _tableau->performDegeneratePivot(); - _activeEntryStrategy->postPivotHook( _tableau, false ); - - ASSERT( !_tableau->isBasic( fix._variable ) ); - _tableau->setNonBasicAssignment( fix._variable, fix._value, true ); -} - -bool Engine::processInputQuery( const IQuery &inputQuery ) -{ - return processInputQuery( inputQuery, GlobalConfiguration::PREPROCESS_INPUT_QUERY ); -} - -bool Engine::calculateBounds( const IQuery &inputQuery ) -{ - ENGINE_LOG( "calculateBounds starting\n" ); - struct timespec start = TimeUtils::sampleMicro(); - - try - { - invokePreprocessor( inputQuery, true ); - if ( _verbosity > 1 ) - printInputBounds( inputQuery ); - - initializeNetworkLevelReasoning(); - - performSymbolicBoundTightening( &( *_preprocessedQuery ) ); - performSimulation(); - performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); - performAdditionalBackwardAnalysisIfNeeded(); - - if ( _networkLevelReasoner && Options::get()->getBool( Options::DUMP_BOUNDS ) ) - _networkLevelReasoner->dumpBounds(); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.setLongAttribute( Statistics::CALCULATE_BOUNDS_TIME_MICRO, - TimeUtils::timePassed( start, end ) ); - if ( !_tableau->allBoundsValid() ) - { - // Some variable bounds are invalid, so the query is unsat - throw InfeasibleQueryException(); - } - } - catch ( const InfeasibleQueryException & ) - { - ENGINE_LOG( "calculateBounds done\n" ); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.setLongAttribute( Statistics::CALCULATE_BOUNDS_TIME_MICRO, - TimeUtils::timePassed( start, end ) ); - - _exitCode = Engine::UNSAT; - printf( "unsat\n" ); - - return false; - } - - ENGINE_LOG( "calculateBounds done\n" ); - - return true; -} - -void Engine::invokePreprocessor( const IQuery &inputQuery, bool preprocess ) -{ - if ( _verbosity > 0 ) - printf( "Engine::processInputQuery: Input query (before preprocessing): " - "%u equations, %u variables\n", - inputQuery.getNumberOfEquations(), - inputQuery.getNumberOfVariables() ); - - // If processing is enabled, invoke the preprocessor - _preprocessingEnabled = preprocess; - if ( _preprocessingEnabled ) - _preprocessedQuery = _preprocessor.preprocess( - inputQuery, GlobalConfiguration::PREPROCESSOR_ELIMINATE_VARIABLES ); - else - { - _preprocessedQuery = std::unique_ptr( inputQuery.generateQuery() ); - Preprocessor().informConstraintsOfInitialBounds( *_preprocessedQuery ); - } - - if ( _verbosity > 0 ) - printf( "Engine::processInputQuery: Input query (after preprocessing): " - "%u equations, %u variables\n\n", - _preprocessedQuery->getNumberOfEquations(), - _preprocessedQuery->getNumberOfVariables() ); - - unsigned infiniteBounds = _preprocessedQuery->countInfiniteBounds(); - if ( infiniteBounds != 0 ) - { - _exitCode = Engine::ERROR; - throw MarabouError( MarabouError::UNBOUNDED_VARIABLES_NOT_YET_SUPPORTED, - Stringf( "Error! Have %u infinite bounds", infiniteBounds ).ascii() ); - } -} - -void Engine::printInputBounds( const IQuery &inputQuery ) const -{ - printf( "Input bounds:\n" ); - for ( unsigned i = 0; i < inputQuery.getNumInputVariables(); ++i ) - { - unsigned variable = inputQuery.inputVariableByIndex( i ); - double lb, ub; - bool fixed = false; - if ( _preprocessingEnabled ) - { - // Fixed variables are easy: return the value they've been fixed to. - if ( _preprocessor.variableIsFixed( variable ) ) - { - fixed = true; - lb = _preprocessor.getFixedValue( variable ); - ub = lb; - } - else - { - // Has the variable been merged into another? - while ( _preprocessor.variableIsMerged( variable ) ) - variable = _preprocessor.getMergedIndex( variable ); - - // We know which variable to look for, but it may have been assigned - // a new index, due to variable elimination - variable = _preprocessor.getNewIndex( variable ); - - lb = _preprocessedQuery->getLowerBound( variable ); - ub = _preprocessedQuery->getUpperBound( variable ); - } - } - else - { - lb = inputQuery.getLowerBound( variable ); - ub = inputQuery.getUpperBound( variable ); - } - - printf( "\tx%u: [%8.4lf, %8.4lf] %s\n", i, lb, ub, fixed ? "[FIXED]" : "" ); - } - printf( "\n" ); -} - -void Engine::storeEquationsInDegradationChecker() -{ - _degradationChecker.storeEquations( *_preprocessedQuery ); -} - -double *Engine::createConstraintMatrix() -{ - const List &equations( _preprocessedQuery->getEquations() ); - unsigned m = equations.size(); - unsigned n = _preprocessedQuery->getNumberOfVariables(); - - // Step 1: create a constraint matrix from the equations - double *constraintMatrix = new double[n * m]; - if ( !constraintMatrix ) - throw MarabouError( MarabouError::ALLOCATION_FAILED, "Engine::constraintMatrix" ); - std::fill_n( constraintMatrix, n * m, 0.0 ); - - unsigned equationIndex = 0; - for ( const auto &equation : equations ) - { - if ( equation._type != Equation::EQ ) - { - _exitCode = Engine::ERROR; - throw MarabouError( MarabouError::NON_EQUALITY_INPUT_EQUATION_DISCOVERED ); - } - - for ( const auto &addend : equation._addends ) - constraintMatrix[equationIndex * n + addend._variable] = addend._coefficient; - - ++equationIndex; - } - - return constraintMatrix; -} - -void Engine::removeRedundantEquations( const double *constraintMatrix ) -{ - const List &equations( _preprocessedQuery->getEquations() ); - unsigned m = equations.size(); - unsigned n = _preprocessedQuery->getNumberOfVariables(); - - // Step 1: analyze the matrix to identify redundant rows - AutoConstraintMatrixAnalyzer analyzer; - analyzer->analyze( constraintMatrix, m, n ); - - ENGINE_LOG( - Stringf( "Number of redundant rows: %u out of %u", analyzer->getRedundantRows().size(), m ) - .ascii() ); - - // Step 2: remove any equations corresponding to redundant rows - Set redundantRows = analyzer->getRedundantRows(); - - if ( !redundantRows.empty() ) - { - _preprocessedQuery->removeEquationsByIndex( redundantRows ); - m = equations.size(); - } -} - -void Engine::selectInitialVariablesForBasis( const double *constraintMatrix, - List &initialBasis, - List &basicRows ) -{ - /* - This method permutes rows and columns in the constraint matrix (prior - to the addition of auxiliary variables), in order to obtain a set of - column that constitue a lower triangular matrix. The variables - corresponding to the columns of this matrix join the initial basis. - - (It is possible that not enough variables are obtained this way, in which - case the initial basis will have to be augmented later). - */ - - const List &equations( _preprocessedQuery->getEquations() ); - - unsigned m = equations.size(); - unsigned n = _preprocessedQuery->getNumberOfVariables(); - - // Trivial case, or if a trivial basis is requested - if ( ( m == 0 ) || ( n == 0 ) || GlobalConfiguration::ONLY_AUX_INITIAL_BASIS ) - { - for ( unsigned i = 0; i < m; ++i ) - basicRows.append( i ); - - return; - } - - unsigned *nnzInRow = new unsigned[m]; - unsigned *nnzInColumn = new unsigned[n]; - - std::fill_n( nnzInRow, m, 0 ); - std::fill_n( nnzInColumn, n, 0 ); - - unsigned *columnOrdering = new unsigned[n]; - unsigned *rowOrdering = new unsigned[m]; - - for ( unsigned i = 0; i < m; ++i ) - rowOrdering[i] = i; - - for ( unsigned i = 0; i < n; ++i ) - columnOrdering[i] = i; - - // Initialize the counters - for ( unsigned i = 0; i < m; ++i ) - { - for ( unsigned j = 0; j < n; ++j ) - { - if ( !FloatUtils::isZero( constraintMatrix[i * n + j] ) ) - { - ++nnzInRow[i]; - ++nnzInColumn[j]; - } - } - } - - DEBUG( { - for ( unsigned i = 0; i < m; ++i ) - { - ASSERT( nnzInRow[i] > 0 ); - } - } ); - - unsigned numExcluded = 0; - unsigned numTriangularRows = 0; - unsigned temp; - - while ( numExcluded + numTriangularRows < n ) - { - // Do we have a singleton row? - unsigned singletonRow = m; - for ( unsigned i = numTriangularRows; i < m; ++i ) - { - if ( nnzInRow[i] == 1 ) - { - singletonRow = i; - break; - } - } - - if ( singletonRow < m ) - { - // Have a singleton row! Swap it to the top and update counters - temp = rowOrdering[singletonRow]; - rowOrdering[singletonRow] = rowOrdering[numTriangularRows]; - rowOrdering[numTriangularRows] = temp; - - temp = nnzInRow[numTriangularRows]; - nnzInRow[numTriangularRows] = nnzInRow[singletonRow]; - nnzInRow[singletonRow] = temp; - - // Find the non-zero entry in the row and swap it to the diagonal - DEBUG( bool foundNonZero = false ); - for ( unsigned i = numTriangularRows; i < n - numExcluded; ++i ) - { - if ( !FloatUtils::isZero( constraintMatrix[rowOrdering[numTriangularRows] * n + - columnOrdering[i]] ) ) - { - temp = columnOrdering[i]; - columnOrdering[i] = columnOrdering[numTriangularRows]; - columnOrdering[numTriangularRows] = temp; - - temp = nnzInColumn[numTriangularRows]; - nnzInColumn[numTriangularRows] = nnzInColumn[i]; - nnzInColumn[i] = temp; - - DEBUG( foundNonZero = true ); - break; - } - } - - ASSERT( foundNonZero ); - - // Remove all entries under the diagonal entry from the row counters - for ( unsigned i = numTriangularRows + 1; i < m; ++i ) - { - if ( !FloatUtils::isZero( constraintMatrix[rowOrdering[i] * n + - columnOrdering[numTriangularRows]] ) ) - --nnzInRow[i]; - } - - ++numTriangularRows; - } - else - { - // No singleton rows. Exclude the densest column - unsigned maxDensity = nnzInColumn[numTriangularRows]; - unsigned column = numTriangularRows; - - for ( unsigned i = numTriangularRows; i < n - numExcluded; ++i ) - { - if ( nnzInColumn[i] > maxDensity ) - { - maxDensity = nnzInColumn[i]; - column = i; - } - } - - // Update the row counters to account for the excluded column - for ( unsigned i = numTriangularRows; i < m; ++i ) - { - double element = constraintMatrix[rowOrdering[i] * n + columnOrdering[column]]; - if ( !FloatUtils::isZero( element ) ) - { - ASSERT( nnzInRow[i] > 1 ); - --nnzInRow[i]; - } - } - - columnOrdering[column] = columnOrdering[n - 1 - numExcluded]; - nnzInColumn[column] = nnzInColumn[n - 1 - numExcluded]; - ++numExcluded; - } - } - - // Final basis: diagonalized columns + non-diagonalized rows - List result; - - for ( unsigned i = 0; i < numTriangularRows; ++i ) - { - initialBasis.append( columnOrdering[i] ); - } - - for ( unsigned i = numTriangularRows; i < m; ++i ) - { - basicRows.append( rowOrdering[i] ); - } - - // Cleanup - delete[] nnzInRow; - delete[] nnzInColumn; - delete[] columnOrdering; - delete[] rowOrdering; -} - -void Engine::addAuxiliaryVariables() -{ - List &equations( _preprocessedQuery->getEquations() ); - - unsigned m = equations.size(); - unsigned originalN = _preprocessedQuery->getNumberOfVariables(); - unsigned n = originalN + m; - - _preprocessedQuery->setNumberOfVariables( n ); - - // Add auxiliary variables to the equations and set their bounds - unsigned count = 0; - for ( auto &eq : equations ) - { - unsigned auxVar = originalN + count; - if ( _produceUNSATProofs ) - _preprocessedQuery->_lastAddendToAux.insert( eq._addends.back()._variable, auxVar ); - eq.addAddend( -1, auxVar ); - _preprocessedQuery->setLowerBound( auxVar, eq._scalar ); - _preprocessedQuery->setUpperBound( auxVar, eq._scalar ); - eq.setScalar( 0 ); - - ++count; - } -} - -void Engine::augmentInitialBasisIfNeeded( List &initialBasis, - const List &basicRows ) -{ - unsigned m = _preprocessedQuery->getEquations().size(); - unsigned n = _preprocessedQuery->getNumberOfVariables(); - unsigned originalN = n - m; - - if ( initialBasis.size() != m ) - { - for ( const auto &basicRow : basicRows ) - initialBasis.append( basicRow + originalN ); - } -} - -void Engine::initializeTableau( const double *constraintMatrix, const List &initialBasis ) -{ - const List &equations( _preprocessedQuery->getEquations() ); - unsigned m = equations.size(); - unsigned n = _preprocessedQuery->getNumberOfVariables(); - - _tableau->setDimensions( m, n ); - - adjustWorkMemorySize(); - - unsigned equationIndex = 0; - for ( const auto &equation : equations ) - { - _tableau->setRightHandSide( equationIndex, equation._scalar ); - ++equationIndex; - } - - // Populate constriant matrix - _tableau->setConstraintMatrix( constraintMatrix ); - - _tableau->registerToWatchAllVariables( _rowBoundTightener ); - _tableau->registerResizeWatcher( _rowBoundTightener ); - - _rowBoundTightener->setDimensions(); - - initializeBoundsAndConstraintWatchersInTableau( n ); - - _tableau->initializeTableau( initialBasis ); - - _costFunctionManager->initialize(); - _tableau->registerCostFunctionManager( _costFunctionManager ); - _activeEntryStrategy->initialize( _tableau ); -} - -void Engine::initializeBoundsAndConstraintWatchersInTableau( unsigned numberOfVariables ) -{ - _plConstraints = _preprocessedQuery->getPiecewiseLinearConstraints(); - for ( const auto &constraint : _plConstraints ) - { - constraint->registerAsWatcher( _tableau ); - constraint->setStatistics( &_statistics ); - - // Assuming aux var is use, add the constraint's auxiliary variable assigned to it in the - // tableau, to the constraint - if ( _produceUNSATProofs ) - for ( unsigned var : constraint->getNativeAuxVars() ) - if ( _preprocessedQuery->_lastAddendToAux.exists( var ) ) - constraint->addTableauAuxVar( _preprocessedQuery->_lastAddendToAux.at( var ), - var ); - } - - _nlConstraints = _preprocessedQuery->getNonlinearConstraints(); - for ( const auto &constraint : _nlConstraints ) - { - constraint->registerAsWatcher( _tableau ); - constraint->setStatistics( &_statistics ); - } - - for ( unsigned i = 0; i < numberOfVariables; ++i ) - { - _tableau->setLowerBound( i, _preprocessedQuery->getLowerBound( i ) ); - _tableau->setUpperBound( i, _preprocessedQuery->getUpperBound( i ) ); - } - _boundManager.storeLocalBounds(); - - _statistics.setUnsignedAttribute( Statistics::NUM_PL_CONSTRAINTS, _plConstraints.size() ); -} - -void Engine::initializeNetworkLevelReasoning() -{ - _networkLevelReasoner = _preprocessedQuery->getNetworkLevelReasoner(); - - if ( _networkLevelReasoner ) - { - _networkLevelReasoner->computeSuccessorLayers(); - _networkLevelReasoner->setTableau( _tableau ); - if ( Options::get()->getBool( Options::DUMP_TOPOLOGY ) ) - { - _networkLevelReasoner->dumpTopology( false ); - std::cout << std::endl; - } - } -} - -bool Engine::processInputQuery( const IQuery &inputQuery, bool preprocess ) -{ - ENGINE_LOG( "processInputQuery starting\n" ); - struct timespec start = TimeUtils::sampleMicro(); - - try - { - invokePreprocessor( inputQuery, preprocess ); - if ( _verbosity > 1 ) - printInputBounds( inputQuery ); - initializeNetworkLevelReasoning(); - if ( preprocess ) - { - performSymbolicBoundTightening( &( *_preprocessedQuery ) ); - performSimulation(); - performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); - performAdditionalBackwardAnalysisIfNeeded(); - } - - if ( GlobalConfiguration::PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING ) - for ( auto &plConstraint : _preprocessedQuery->getPiecewiseLinearConstraints() ) - plConstraint->addAuxiliaryEquationsAfterPreprocessing( *_preprocessedQuery ); - - if ( GlobalConfiguration::NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING ) - for ( auto &nlConstraint : _preprocessedQuery->getNonlinearConstraints() ) - nlConstraint->addAuxiliaryEquationsAfterPreprocessing( *_preprocessedQuery ); - - if ( _produceUNSATProofs ) - { - for ( auto &plConstraint : _preprocessedQuery->getPiecewiseLinearConstraints() ) - { - if ( !UNSATCertificateUtils::getSupportedActivations().exists( - plConstraint->getType() ) ) - { - _produceUNSATProofs = false; - Options::get()->setBool( Options::PRODUCE_PROOFS, false ); - String activationType = - plConstraint->serializeToString().tokenize( "," ).back(); - printf( - "Turning off proof production since activation %s is not yet supported\n", - activationType.ascii() ); - break; - } - } - } - - if ( _lpSolverType == LPSolverType::NATIVE ) - { - double *constraintMatrix = createConstraintMatrix(); - removeRedundantEquations( constraintMatrix ); - - // The equations have changed, recreate the constraint matrix - delete[] constraintMatrix; - constraintMatrix = createConstraintMatrix(); - - List initialBasis; - List basicRows; - selectInitialVariablesForBasis( constraintMatrix, initialBasis, basicRows ); - addAuxiliaryVariables(); - augmentInitialBasisIfNeeded( initialBasis, basicRows ); - - storeEquationsInDegradationChecker(); - - // The equations have changed, recreate the constraint matrix - delete[] constraintMatrix; - constraintMatrix = createConstraintMatrix(); - - unsigned n = _preprocessedQuery->getNumberOfVariables(); - _boundManager.initialize( n ); - - initializeTableau( constraintMatrix, initialBasis ); - _boundManager.initializeBoundExplainer( n, _tableau->getM() ); - delete[] constraintMatrix; - - if ( _produceUNSATProofs ) - { - _UNSATCertificate = new UnsatCertificateNode( NULL, PiecewiseLinearCaseSplit() ); - _UNSATCertificateCurrentPointer->set( _UNSATCertificate ); - _UNSATCertificate->setVisited(); - _groundBoundManager.initialize( n ); - - for ( unsigned i = 0; i < n; ++i ) - { - _groundBoundManager.setUpperBound( i, _preprocessedQuery->getUpperBound( i ) ); - _groundBoundManager.setLowerBound( i, _preprocessedQuery->getLowerBound( i ) ); - } - } - } - else - { - ASSERT( _lpSolverType == LPSolverType::GUROBI ); - - ASSERT( GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH == true ); - - if ( _verbosity > 0 ) - printf( "Using Gurobi to solve LP...\n" ); - - unsigned n = _preprocessedQuery->getNumberOfVariables(); - unsigned m = _preprocessedQuery->getEquations().size(); - // Only use BoundManager to store the bounds. - _boundManager.initialize( n ); - _tableau->setDimensions( m, n ); - initializeBoundsAndConstraintWatchersInTableau( n ); - } - - for ( const auto &constraint : _plConstraints ) - constraint->registerTableau( _tableau ); - for ( const auto &constraint : _nlConstraints ) - constraint->registerTableau( _tableau ); - - if ( _networkLevelReasoner && Options::get()->getBool( Options::DUMP_BOUNDS ) ) - _networkLevelReasoner->dumpBounds(); - - if ( GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH ) - { - _soiManager = std::unique_ptr( - new SumOfInfeasibilitiesManager( *_preprocessedQuery, *_tableau ) ); - _soiManager->setStatistics( &_statistics ); - } - - if ( GlobalConfiguration::WARM_START ) - warmStart(); - - decideBranchingHeuristics(); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.setLongAttribute( Statistics::PREPROCESSING_TIME_MICRO, - TimeUtils::timePassed( start, end ) ); - - if ( !_tableau->allBoundsValid() ) - { - // Some variable bounds are invalid, so the query is unsat - throw InfeasibleQueryException(); - } - } - catch ( const InfeasibleQueryException & ) - { - ENGINE_LOG( "processInputQuery done\n" ); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.setLongAttribute( Statistics::PREPROCESSING_TIME_MICRO, - TimeUtils::timePassed( start, end ) ); - - _exitCode = Engine::UNSAT; - return false; - } - - ENGINE_LOG( "processInputQuery done\n" ); - - DEBUG( { - // Initially, all constraints should be active - for ( const auto &plc : _plConstraints ) - { - ASSERT( plc->isActive() ); - } - } ); - - _smtCore.storeDebuggingSolution( _preprocessedQuery->_debuggingSolution ); - return true; -} - -void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) -{ - if ( _networkLevelReasoner && Options::get()->gurobiEnabled() ) - { - // Obtain from and store bounds into inputquery if it is not null. - if ( inputQuery ) - _networkLevelReasoner->obtainCurrentBounds( *inputQuery ); - else - _networkLevelReasoner->obtainCurrentBounds(); - - // TODO: Remove this block after getting ready to support sigmoid with MILP Bound - // Tightening. - if ( ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::MILP_ENCODING || - _milpSolverBoundTighteningType == - MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL || - _milpSolverBoundTighteningType == - MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION ) && - _preprocessedQuery->getNonlinearConstraints().size() > 0 ) - throw MarabouError( MarabouError::FEATURE_NOT_YET_SUPPORTED, - "Marabou doesn't support sigmoid with MILP Bound Tightening" ); - - switch ( _milpSolverBoundTighteningType ) - { - case MILPSolverBoundTighteningType::LP_RELAXATION: - case MILPSolverBoundTighteningType::LP_RELAXATION_INCREMENTAL: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: - _networkLevelReasoner->lpRelaxationPropagation(); - break; - case MILPSolverBoundTighteningType::MILP_ENCODING: - case MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL: - _networkLevelReasoner->MILPPropagation(); - break; - case MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION: - _networkLevelReasoner->iterativePropagation(); - break; - case MILPSolverBoundTighteningType::NONE: - return; - } - List tightenings; - _networkLevelReasoner->getConstraintTightenings( tightenings ); - - - if ( inputQuery ) - { - for ( const auto &tightening : tightenings ) - { - if ( tightening._type == Tightening::LB && - FloatUtils::gt( tightening._value, - inputQuery->getLowerBound( tightening._variable ) ) ) - inputQuery->setLowerBound( tightening._variable, tightening._value ); - if ( tightening._type == Tightening::UB && - FloatUtils::lt( tightening._value, - inputQuery->getUpperBound( tightening._variable ) ) ) - inputQuery->setUpperBound( tightening._variable, tightening._value ); - } - } - else - { - for ( const auto &tightening : tightenings ) - { - if ( tightening._type == Tightening::LB ) - _tableau->tightenLowerBound( tightening._variable, tightening._value ); - - else if ( tightening._type == Tightening::UB ) - _tableau->tightenUpperBound( tightening._variable, tightening._value ); - } - } - } -} - -void Engine::performAdditionalBackwardAnalysisIfNeeded() -{ - if ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE || - _milpSolverBoundTighteningType == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE ) - { - unsigned tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); - if ( _verbosity > 0 ) - printf( "Backward analysis tightened %u bounds\n", tightened ); - while ( tightened && - _milpSolverBoundTighteningType == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE && - GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS ) - { - performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); - tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); - if ( _verbosity > 0 ) - printf( "Backward analysis tightened %u bounds\n", tightened ); - } - } -} - -void Engine::performMILPSolverBoundedTighteningForSingleLayer( unsigned targetIndex ) -{ - if ( _produceUNSATProofs ) - return; - - if ( _networkLevelReasoner && _isGurobyEnabled && _performLpTighteningAfterSplit && - _milpSolverBoundTighteningType != MILPSolverBoundTighteningType::NONE ) - { - _networkLevelReasoner->obtainCurrentBounds(); - _networkLevelReasoner->clearConstraintTightenings(); - - switch ( _milpSolverBoundTighteningType ) - { - case MILPSolverBoundTighteningType::LP_RELAXATION: - _networkLevelReasoner->LPTighteningForOneLayer( targetIndex ); - break; - case MILPSolverBoundTighteningType::LP_RELAXATION_INCREMENTAL: - return; - case MILPSolverBoundTighteningType::MILP_ENCODING: - _networkLevelReasoner->MILPTighteningForOneLayer( targetIndex ); - break; - case MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL: - return; - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: - case MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION: - case MILPSolverBoundTighteningType::NONE: - return; - } - List tightenings; - _networkLevelReasoner->getConstraintTightenings( tightenings ); - - for ( const auto &tightening : tightenings ) - { - if ( tightening._type == Tightening::LB ) - _tableau->tightenLowerBound( tightening._variable, tightening._value ); - - else if ( tightening._type == Tightening::UB ) - _tableau->tightenUpperBound( tightening._variable, tightening._value ); - } - } -} - -void Engine::extractSolution( IQuery &inputQuery, Preprocessor *preprocessor ) -{ - Preprocessor *preprocessorInUse = nullptr; - if ( preprocessor != nullptr ) - preprocessorInUse = preprocessor; - else if ( _preprocessingEnabled ) - preprocessorInUse = &_preprocessor; - - for ( unsigned i = 0; i < inputQuery.getNumberOfVariables(); ++i ) - { - if ( preprocessorInUse ) - { - // Symbolically fixed variables are skipped. They will be re-constructed in the end. - if ( preprocessorInUse->variableIsUnusedAndSymbolicallyFixed( i ) ) - continue; - - // Has the variable been merged into another? - unsigned variable = i; - while ( preprocessorInUse->variableIsMerged( variable ) ) - variable = preprocessorInUse->getMergedIndex( variable ); - - // Fixed variables are easy: return the value they've been fixed to. - if ( preprocessorInUse->variableIsFixed( variable ) ) - { - inputQuery.setSolutionValue( i, preprocessorInUse->getFixedValue( variable ) ); - continue; - } - - // We know which variable to look for, but it may have been assigned - // a new index, due to variable elimination - variable = preprocessorInUse->getNewIndex( variable ); - - // Finally, set the assigned value - inputQuery.setSolutionValue( i, _tableau->getValue( variable ) ); - } - else - { - inputQuery.setSolutionValue( i, _tableau->getValue( i ) ); - } - } - - if ( preprocessorInUse ) - { - /* - Recover the assignment of eliminated neurons (e.g., due to merging of WS layers) - */ - preprocessorInUse->setSolutionValuesOfEliminatedNeurons( inputQuery ); - } -} - -bool Engine::allVarsWithinBounds() const -{ - if ( _lpSolverType == LPSolverType::GUROBI ) - { - ASSERT( _gurobi ); - return _gurobi->haveFeasibleSolution(); - } - else - return !_tableau->existsBasicOutOfBounds(); -} - -void Engine::collectViolatedPlConstraints() -{ - _violatedPlConstraints.clear(); - for ( const auto &constraint : _plConstraints ) - { - if ( constraint->isActive() && !constraint->satisfied() ) - _violatedPlConstraints.append( constraint ); - } -} - -bool Engine::allPlConstraintsHold() -{ - return _violatedPlConstraints.empty(); -} - -bool Engine::allNonlinearConstraintsHold() -{ - for ( const auto &constraint : _nlConstraints ) - { - if ( !constraint->satisfied() ) - return false; - } - return true; -} - -bool Engine::hasBranchingCandidate() -{ - for ( const auto &constraint : _plConstraints ) - { - if ( constraint->isActive() && !constraint->phaseFixed() ) - return true; - } - return false; -} - -void Engine::selectViolatedPlConstraint() -{ - ASSERT( !_violatedPlConstraints.empty() ); - - _plConstraintToFix = _smtCore.chooseViolatedConstraintForFixing( _violatedPlConstraints ); - - ASSERT( _plConstraintToFix ); -} - -void Engine::reportPlViolation() -{ - _smtCore.reportViolatedConstraint( _plConstraintToFix ); -} - -void Engine::storeState( EngineState &state, TableauStateStorageLevel level ) const -{ - _tableau->storeState( state._tableauState, level ); - state._tableauStateStorageLevel = level; - - for ( const auto &constraint : _plConstraints ) - state._plConstraintToState[constraint] = constraint->duplicateConstraint(); - - state._numPlConstraintsDisabledByValidSplits = _numPlConstraintsDisabledByValidSplits; -} - -void Engine::restoreState( const EngineState &state ) -{ - ENGINE_LOG( "Restore state starting" ); - - if ( state._tableauStateStorageLevel == TableauStateStorageLevel::STORE_NONE ) - throw MarabouError( MarabouError::RESTORING_ENGINE_FROM_INVALID_STATE ); - - ENGINE_LOG( "\tRestoring tableau state" ); - _tableau->restoreState( state._tableauState, state._tableauStateStorageLevel ); - - ENGINE_LOG( "\tRestoring constraint states" ); - for ( auto &constraint : _plConstraints ) - { - if ( !state._plConstraintToState.exists( constraint ) ) - throw MarabouError( MarabouError::MISSING_PL_CONSTRAINT_STATE ); - - constraint->restoreState( state._plConstraintToState[constraint] ); - } - - _numPlConstraintsDisabledByValidSplits = state._numPlConstraintsDisabledByValidSplits; - - if ( _lpSolverType == LPSolverType::NATIVE ) - { - // Make sure the data structures are initialized to the correct size - _rowBoundTightener->setDimensions(); - adjustWorkMemorySize(); - _activeEntryStrategy->resizeHook( _tableau ); - _costFunctionManager->initialize(); - } - - // Reset the violation counts in the SMT core - _smtCore.resetSplitConditions(); -} - -void Engine::setNumPlConstraintsDisabledByValidSplits( unsigned numConstraints ) -{ - _numPlConstraintsDisabledByValidSplits = numConstraints; -} - -bool Engine::attemptToMergeVariables( unsigned x1, unsigned x2 ) -{ - /* - First, we need to ensure that the variables are both non-basic. - */ - - unsigned n = _tableau->getN(); - unsigned m = _tableau->getM(); - - if ( _tableau->isBasic( x1 ) ) - { - TableauRow x1Row( n - m ); - _tableau->getTableauRow( _tableau->variableToIndex( x1 ), &x1Row ); - - bool found = false; - double bestCoefficient = 0.0; - unsigned nonBasic = 0; - for ( unsigned i = 0; i < n - m; ++i ) - { - if ( x1Row._row[i]._var != x2 ) - { - double contender = FloatUtils::abs( x1Row._row[i]._coefficient ); - if ( FloatUtils::gt( contender, bestCoefficient ) ) - { - found = true; - nonBasic = x1Row._row[i]._var; - bestCoefficient = contender; - } - } - } - - if ( !found ) - return false; - - _tableau->setEnteringVariableIndex( _tableau->variableToIndex( nonBasic ) ); - _tableau->setLeavingVariableIndex( _tableau->variableToIndex( x1 ) ); - - // Make sure the change column and pivot row are up-to-date - strategies - // such as projected steepest edge need these for their internal updates. - _tableau->computeChangeColumn(); - _tableau->computePivotRow(); - - _activeEntryStrategy->prePivotHook( _tableau, false ); - _tableau->performDegeneratePivot(); - _activeEntryStrategy->postPivotHook( _tableau, false ); - } - - if ( _tableau->isBasic( x2 ) ) - { - TableauRow x2Row( n - m ); - _tableau->getTableauRow( _tableau->variableToIndex( x2 ), &x2Row ); - - bool found = false; - double bestCoefficient = 0.0; - unsigned nonBasic = 0; - for ( unsigned i = 0; i < n - m; ++i ) - { - if ( x2Row._row[i]._var != x1 ) - { - double contender = FloatUtils::abs( x2Row._row[i]._coefficient ); - if ( FloatUtils::gt( contender, bestCoefficient ) ) - { - found = true; - nonBasic = x2Row._row[i]._var; - bestCoefficient = contender; - } - } - } - - if ( !found ) - return false; - - _tableau->setEnteringVariableIndex( _tableau->variableToIndex( nonBasic ) ); - _tableau->setLeavingVariableIndex( _tableau->variableToIndex( x2 ) ); - - // Make sure the change column and pivot row are up-to-date - strategies - // such as projected steepest edge need these for their internal updates. - _tableau->computeChangeColumn(); - _tableau->computePivotRow(); - - _activeEntryStrategy->prePivotHook( _tableau, false ); - _tableau->performDegeneratePivot(); - _activeEntryStrategy->postPivotHook( _tableau, false ); - } - - // Both variables are now non-basic, so we can merge their columns - _tableau->mergeColumns( x1, x2 ); - DEBUG( _tableau->verifyInvariants() ); - - // Reset the entry strategy - _activeEntryStrategy->initialize( _tableau ); - - return true; -} - -void Engine::applySplit( const PiecewiseLinearCaseSplit &split ) -{ - ENGINE_LOG( "" ); - ENGINE_LOG( "Applying a split. " ); - - DEBUG( _tableau->verifyInvariants() ); - - List bounds = split.getBoundTightenings(); - List equations = split.getEquations(); - - // We assume that case splits only apply new bounds but do not apply - // new equations. This can always be made possible. - if ( _lpSolverType != LPSolverType::NATIVE && equations.size() > 0 ) - throw MarabouError( MarabouError::FEATURE_NOT_YET_SUPPORTED, - "Can only update bounds when using non-native" - "simplex engine!" ); - - for ( auto &equation : equations ) - { - /* - First, adjust the equation if any variables have been merged. - E.g., if the equation is x1 + x2 + x3 = 0, and x1 and x2 have been - merged, the equation becomes 2x1 + x3 = 0 - */ - for ( auto &addend : equation._addends ) - addend._variable = _tableau->getVariableAfterMerging( addend._variable ); - - List::iterator addend; - List::iterator otherAddend; - - addend = equation._addends.begin(); - while ( addend != equation._addends.end() ) - { - otherAddend = addend; - ++otherAddend; - - while ( otherAddend != equation._addends.end() ) - { - if ( otherAddend->_variable == addend->_variable ) - { - addend->_coefficient += otherAddend->_coefficient; - otherAddend = equation._addends.erase( otherAddend ); - } - else - ++otherAddend; - } - - if ( FloatUtils::isZero( addend->_coefficient ) ) - addend = equation._addends.erase( addend ); - else - ++addend; - } - - /* - In the general case, we just add the new equation to the tableau. - However, we also support a very common case: equations of the form - x1 = x2, which are common, e.g., with ReLUs. For these equations we - may be able to merge two columns of the tableau. - */ - unsigned x1, x2; - bool canMergeColumns = - // Only if the flag is on - GlobalConfiguration::USE_COLUMN_MERGING_EQUATIONS && - // Only if the equation has the correct form - equation.isVariableMergingEquation( x1, x2 ) && - // And only if the variables are not out of bounds - ( !_tableau->isBasic( x1 ) || - !_tableau->basicOutOfBounds( _tableau->variableToIndex( x1 ) ) ) && - ( !_tableau->isBasic( x2 ) || - !_tableau->basicOutOfBounds( _tableau->variableToIndex( x2 ) ) ); - - bool columnsSuccessfullyMerged = false; - if ( canMergeColumns ) - columnsSuccessfullyMerged = attemptToMergeVariables( x1, x2 ); - - if ( !columnsSuccessfullyMerged ) - { - // General case: add a new equation to the tableau - unsigned auxVariable = _tableau->addEquation( equation ); - _activeEntryStrategy->resizeHook( _tableau ); - - switch ( equation._type ) - { - case Equation::GE: - bounds.append( Tightening( auxVariable, 0.0, Tightening::UB ) ); - break; - - case Equation::LE: - bounds.append( Tightening( auxVariable, 0.0, Tightening::LB ) ); - break; - - case Equation::EQ: - bounds.append( Tightening( auxVariable, 0.0, Tightening::LB ) ); - bounds.append( Tightening( auxVariable, 0.0, Tightening::UB ) ); - break; - - default: - ASSERT( false ); - break; - } - } - } - - if ( _lpSolverType == LPSolverType::NATIVE ) - { - adjustWorkMemorySize(); - } - - for ( auto &bound : bounds ) - { - unsigned variable = _tableau->getVariableAfterMerging( bound._variable ); - - if ( bound._type == Tightening::LB ) - { - ENGINE_LOG( - Stringf( "x%u: lower bound set to %.3lf", variable, bound._value ).ascii() ); - if ( _produceUNSATProofs && - FloatUtils::gt( bound._value, _boundManager.getLowerBound( bound._variable ) ) ) - { - _boundManager.resetExplanation( variable, Tightening::LB ); - updateGroundLowerBound( variable, bound._value ); - _boundManager.tightenLowerBound( variable, bound._value ); - } - else if ( !_produceUNSATProofs ) - _boundManager.tightenLowerBound( variable, bound._value ); - } - else - { - ENGINE_LOG( - Stringf( "x%u: upper bound set to %.3lf", variable, bound._value ).ascii() ); - if ( _produceUNSATProofs && - FloatUtils::lt( bound._value, _boundManager.getUpperBound( bound._variable ) ) ) - { - _boundManager.resetExplanation( variable, Tightening::UB ); - updateGroundUpperBound( variable, bound._value ); - _boundManager.tightenUpperBound( variable, bound._value ); - } - else if ( !_produceUNSATProofs ) - _boundManager.tightenUpperBound( variable, bound._value ); - } - } - - if ( _produceUNSATProofs && _UNSATCertificateCurrentPointer ) - ( **_UNSATCertificateCurrentPointer ).setVisited(); - - DEBUG( _tableau->verifyInvariants() ); - ENGINE_LOG( "Done with split\n" ); -} - -void Engine::applyBoundTightenings() -{ - List tightenings; - _boundManager.getTightenings( tightenings ); - - for ( const auto &tightening : tightenings ) - { - if ( tightening._type == Tightening::LB ) - _tableau->tightenLowerBound( tightening._variable, tightening._value ); - else - _tableau->tightenUpperBound( tightening._variable, tightening._value ); - } -} - -void Engine::applyAllRowTightenings() -{ - applyBoundTightenings(); -} - -void Engine::applyAllConstraintTightenings() -{ - applyBoundTightenings(); -} - -void Engine::applyAllBoundTightenings() -{ - struct timespec start = TimeUtils::sampleMicro(); - - if ( _lpSolverType == LPSolverType::NATIVE ) - applyAllRowTightenings(); - applyAllConstraintTightenings(); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_APPLYING_STORED_TIGHTENINGS_MICRO, - TimeUtils::timePassed( start, end ) ); -} - -bool Engine::applyAllValidConstraintCaseSplits() -{ - struct timespec start = TimeUtils::sampleMicro(); - - bool appliedSplit = false; - for ( auto &constraint : _plConstraints ) - if ( applyValidConstraintCaseSplit( constraint ) ) - appliedSplit = true; - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_PERFORMING_VALID_CASE_SPLITS_MICRO, - TimeUtils::timePassed( start, end ) ); - - return appliedSplit; -} - -bool Engine::applyValidConstraintCaseSplit( PiecewiseLinearConstraint *constraint ) -{ - if ( constraint->isActive() && constraint->phaseFixed() ) - { - String constraintString; - constraint->dump( constraintString ); - ENGINE_LOG( Stringf( "A constraint has become valid. Dumping constraint: %s", - constraintString.ascii() ) - .ascii() ); - - constraint->setActiveConstraint( false ); - PiecewiseLinearCaseSplit validSplit = constraint->getValidCaseSplit(); - _smtCore.recordImpliedValidSplit( validSplit ); - applySplit( validSplit ); - - if ( _soiManager ) - _soiManager->removeCostComponentFromHeuristicCost( constraint ); - ++_numPlConstraintsDisabledByValidSplits; - - return true; - } - - return false; -} - -bool Engine::shouldCheckDegradation() -{ - return _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ) % - GlobalConfiguration::DEGRADATION_CHECKING_FREQUENCY == - 0; -} - -bool Engine::highDegradation() -{ - struct timespec start = TimeUtils::sampleMicro(); - - double degradation = _degradationChecker.computeDegradation( *_tableau ); - _statistics.setDoubleAttribute( Statistics::CURRENT_DEGRADATION, degradation ); - if ( FloatUtils::gt( degradation, - _statistics.getDoubleAttribute( Statistics::MAX_DEGRADATION ) ) ) - _statistics.setDoubleAttribute( Statistics::MAX_DEGRADATION, degradation ); - - bool result = FloatUtils::gt( degradation, GlobalConfiguration::DEGRADATION_THRESHOLD ); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_DEGRADATION_CHECKING, - TimeUtils::timePassed( start, end ) ); - - // Debug - if ( result ) - printf( "High degradation found!\n" ); - // - - return result; -} - -void Engine::tightenBoundsOnConstraintMatrix() -{ - struct timespec start = TimeUtils::sampleMicro(); - - if ( _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ) % - GlobalConfiguration::BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY == - 0 ) - { - _rowBoundTightener->examineConstraintMatrix( true ); - _statistics.incLongAttribute( Statistics::NUM_BOUND_TIGHTENINGS_ON_CONSTRAINT_MATRIX ); - } - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_CONSTRAINT_MATRIX_BOUND_TIGHTENING_MICRO, - TimeUtils::timePassed( start, end ) ); -} - -void Engine::explicitBasisBoundTightening() -{ - struct timespec start = TimeUtils::sampleMicro(); - - bool saturation = GlobalConfiguration::EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION; - - _statistics.incLongAttribute( Statistics::NUM_BOUND_TIGHTENINGS_ON_EXPLICIT_BASIS ); - - switch ( GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE ) - { - case GlobalConfiguration::COMPUTE_INVERTED_BASIS_MATRIX: - _rowBoundTightener->examineInvertedBasisMatrix( saturation ); - break; - - case GlobalConfiguration::USE_IMPLICIT_INVERTED_BASIS_MATRIX: - _rowBoundTightener->examineImplicitInvertedBasisMatrix( saturation ); - break; - - case GlobalConfiguration::DISABLE_EXPLICIT_BASIS_TIGHTENING: - break; - } - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_EXPLICIT_BASIS_BOUND_TIGHTENING_MICRO, - TimeUtils::timePassed( start, end ) ); -} - -void Engine::performPrecisionRestoration( PrecisionRestorer::RestoreBasics restoreBasics ) -{ - struct timespec start = TimeUtils::sampleMicro(); - - // debug - double before = _degradationChecker.computeDegradation( *_tableau ); - // - - _precisionRestorer.restorePrecision( *this, *_tableau, _smtCore, restoreBasics ); - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_PRECISION_RESTORATION, - TimeUtils::timePassed( start, end ) ); - - _statistics.incUnsignedAttribute( Statistics::NUM_PRECISION_RESTORATIONS ); - - // debug - double after = _degradationChecker.computeDegradation( *_tableau ); - if ( _verbosity > 0 ) - printf( "Performing precision restoration. Degradation before: %.15lf. After: %.15lf\n", - before, - after ); - // - - if ( highDegradation() && ( restoreBasics == PrecisionRestorer::RESTORE_BASICS ) ) - { - // First round, with basic restoration, still resulted in high degradation. - // Try again! - start = TimeUtils::sampleMicro(); - _precisionRestorer.restorePrecision( - *this, *_tableau, _smtCore, PrecisionRestorer::DO_NOT_RESTORE_BASICS ); - end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_PRECISION_RESTORATION, - TimeUtils::timePassed( start, end ) ); - _statistics.incUnsignedAttribute( Statistics::NUM_PRECISION_RESTORATIONS ); - - // debug - double afterSecond = _degradationChecker.computeDegradation( *_tableau ); - if ( _verbosity > 0 ) - printf( - "Performing 2nd precision restoration. Degradation before: %.15lf. After: %.15lf\n", - after, - afterSecond ); - - if ( highDegradation() ) - throw MarabouError( MarabouError::RESTORATION_FAILED_TO_RESTORE_PRECISION ); - } -} - -void Engine::storeInitialEngineState() -{ - if ( !_initialStateStored ) - { - _precisionRestorer.storeInitialEngineState( *this ); - _initialStateStored = true; - } -} - -bool Engine::basisRestorationNeeded() const -{ - return _basisRestorationRequired == Engine::STRONG_RESTORATION_NEEDED || - _basisRestorationRequired == Engine::WEAK_RESTORATION_NEEDED; -} - -const Statistics *Engine::getStatistics() const -{ - return &_statistics; -} - -Query *Engine::getQuery() -{ - return &( *_preprocessedQuery ); -} - -void Engine::checkBoundCompliancyWithDebugSolution() -{ - if ( _smtCore.checkSkewFromDebuggingSolution() ) - { - // The stack is compliant, we should not have learned any non-compliant bounds - for ( const auto &var : _preprocessedQuery->_debuggingSolution ) - { - // printf( "Looking at var %u\n", var.first ); - - if ( FloatUtils::gt( _tableau->getLowerBound( var.first ), var.second, 1e-5 ) ) - { - printf( "Error! The stack is compliant, but learned an non-compliant bound: " - "Solution for x%u is %.15lf, but learned lower bound %.15lf\n", - var.first, - var.second, - _tableau->getLowerBound( var.first ) ); - - throw MarabouError( MarabouError::DEBUGGING_ERROR ); - } - - if ( FloatUtils::lt( _tableau->getUpperBound( var.first ), var.second, 1e-5 ) ) - { - printf( "Error! The stack is compliant, but learned an non-compliant bound: " - "Solution for %u is %.15lf, but learned upper bound %.15lf\n", - var.first, - var.second, - _tableau->getUpperBound( var.first ) ); - - throw MarabouError( MarabouError::DEBUGGING_ERROR ); - } - } - } -} - -void Engine::quitSignal() -{ - _quitRequested = true; -} - -Engine::ExitCode Engine::getExitCode() const -{ - return _exitCode; -} - -std::atomic_bool *Engine::getQuitRequested() -{ - return &_quitRequested; -} - -List Engine::getInputVariables() const -{ - return _preprocessedQuery->getInputVariables(); -} - -void Engine::performSimulation() -{ - if ( _simulationSize == 0 || !_networkLevelReasoner || - _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::NONE || - _produceUNSATProofs ) - { - ENGINE_LOG( Stringf( "Skip simulation..." ).ascii() ); - return; - } - - // outer vector is for neuron - // inner vector is for simulation value - Vector> simulations; - - std::mt19937 mt( GlobalConfiguration::SIMULATION_RANDOM_SEED ); - - for ( unsigned i = 0; i < _networkLevelReasoner->getLayer( 0 )->getSize(); ++i ) - { - std::uniform_real_distribution distribution( - _networkLevelReasoner->getLayer( 0 )->getLb( i ), - _networkLevelReasoner->getLayer( 0 )->getUb( i ) ); - Vector simulationInput( _simulationSize ); - - for ( unsigned j = 0; j < _simulationSize; ++j ) - simulationInput[j] = distribution( mt ); - simulations.append( simulationInput ); - } - _networkLevelReasoner->simulate( &simulations ); -} - -unsigned Engine::performSymbolicBoundTightening( Query *inputQuery ) -{ - if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::NONE || - ( !_networkLevelReasoner ) || _produceUNSATProofs ) - return 0; - - struct timespec start = TimeUtils::sampleMicro(); - - unsigned numTightenedBounds = 0; - - // Step 1: tell the NLR about the current bounds - if ( inputQuery ) - { - // Obtain from and store bounds into inputquery if it is not null. - _networkLevelReasoner->obtainCurrentBounds( *inputQuery ); - } - else - { - // Get bounds from Tableau. - _networkLevelReasoner->obtainCurrentBounds(); - } - - // Step 2: perform SBT - if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING ) - _networkLevelReasoner->symbolicBoundPropagation(); - else if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::DEEP_POLY ) - _networkLevelReasoner->deepPolyPropagation(); - - // Step 3: Extract the bounds - List tightenings; - _networkLevelReasoner->getConstraintTightenings( tightenings ); - - if ( inputQuery ) - { - for ( const auto &tightening : tightenings ) - { - if ( tightening._type == Tightening::LB && - FloatUtils::gt( tightening._value, - inputQuery->getLowerBound( tightening._variable ) ) ) - { - inputQuery->setLowerBound( tightening._variable, tightening._value ); - ++numTightenedBounds; - } - - if ( tightening._type == Tightening::UB && - FloatUtils::lt( tightening._value, - inputQuery->getUpperBound( tightening._variable ) ) ) - { - inputQuery->setUpperBound( tightening._variable, tightening._value ); - ++numTightenedBounds; - } - } - } - else - { - for ( const auto &tightening : tightenings ) - { - if ( tightening._type == Tightening::LB && - FloatUtils::gt( tightening._value, - _tableau->getLowerBound( tightening._variable ) ) ) - { - _tableau->tightenLowerBound( tightening._variable, tightening._value ); - ++numTightenedBounds; - } - - if ( tightening._type == Tightening::UB && - FloatUtils::lt( tightening._value, - _tableau->getUpperBound( tightening._variable ) ) ) - { - _tableau->tightenUpperBound( tightening._variable, tightening._value ); - ++numTightenedBounds; - } - } - } - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_PERFORMING_SYMBOLIC_BOUND_TIGHTENING, - TimeUtils::timePassed( start, end ) ); - _statistics.incLongAttribute( Statistics::NUM_TIGHTENINGS_FROM_SYMBOLIC_BOUND_TIGHTENING, - numTightenedBounds ); - return numTightenedBounds; -} - -bool Engine::shouldExitDueToTimeout( double timeout ) const -{ - // A timeout value of 0 means no time limit - if ( timeout == 0 ) - return false; - - return static_cast( _statistics.getTotalTimeInMicro() ) / MICROSECONDS_TO_SECONDS > - timeout; -} - -void Engine::preContextPushHook() -{ - struct timespec start = TimeUtils::sampleMicro(); - _boundManager.storeLocalBounds(); - if ( _produceUNSATProofs ) - _groundBoundManager.storeLocalBounds(); - struct timespec end = TimeUtils::sampleMicro(); - - _statistics.incLongAttribute( Statistics::TIME_CONTEXT_PUSH_HOOK, - TimeUtils::timePassed( start, end ) ); -} - -void Engine::postContextPopHook() -{ - struct timespec start = TimeUtils::sampleMicro(); - - _boundManager.restoreLocalBounds(); - if ( _produceUNSATProofs ) - _groundBoundManager.restoreLocalBounds(); - _tableau->postContextPopHook(); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_CONTEXT_POP_HOOK, - TimeUtils::timePassed( start, end ) ); -} - -void Engine::reset() -{ - resetStatistics(); - _sncMode = false; - clearViolatedPLConstraints(); - resetSmtCore(); - resetBoundTighteners(); - resetExitCode(); -} - -void Engine::resetStatistics() -{ - Statistics statistics; - _statistics = statistics; - _smtCore.setStatistics( &_statistics ); - _tableau->setStatistics( &_statistics ); - _rowBoundTightener->setStatistics( &_statistics ); - _preprocessor.setStatistics( &_statistics ); - _activeEntryStrategy->setStatistics( &_statistics ); - - _statistics.stampStartingTime(); -} - -void Engine::clearViolatedPLConstraints() -{ - _violatedPlConstraints.clear(); - _plConstraintToFix = NULL; -} - -void Engine::resetSmtCore() -{ - _smtCore.reset(); - _smtCore.initializeScoreTrackerIfNeeded( _plConstraints ); -} - -void Engine::resetExitCode() -{ - _exitCode = Engine::NOT_DONE; -} - -void Engine::resetBoundTighteners() -{ -} - -void Engine::warmStart() -{ - // An NLR is required for a warm start - if ( !_networkLevelReasoner ) - return; - - // First, choose an arbitrary assignment for the input variables - unsigned numInputVariables = _preprocessedQuery->getNumInputVariables(); - unsigned numOutputVariables = _preprocessedQuery->getNumOutputVariables(); - - if ( numInputVariables == 0 ) - { - // Trivial case: all inputs are fixed, nothing to evaluate - return; - } - - double *inputAssignment = new double[numInputVariables]; - double *outputAssignment = new double[numOutputVariables]; - - for ( unsigned i = 0; i < numInputVariables; ++i ) - { - unsigned variable = _preprocessedQuery->inputVariableByIndex( i ); - inputAssignment[i] = _tableau->getLowerBound( variable ); - } - - // Evaluate the network for this assignment - _networkLevelReasoner->evaluate( inputAssignment, outputAssignment ); - - // Try to update as many variables as possible to match their assignment - for ( unsigned i = 0; i < _networkLevelReasoner->getNumberOfLayers(); ++i ) - { - const NLR::Layer *layer = _networkLevelReasoner->getLayer( i ); - unsigned layerSize = layer->getSize(); - const double *assignment = layer->getAssignment(); - - for ( unsigned j = 0; j < layerSize; ++j ) - { - if ( layer->neuronHasVariable( j ) ) - { - unsigned variable = layer->neuronToVariable( j ); - if ( !_tableau->isBasic( variable ) ) - _tableau->setNonBasicAssignment( variable, assignment[j], false ); - } - } - } - - // We did what we could for the non-basics; now let the tableau compute - // the basic assignment - _tableau->computeAssignment(); - - delete[] outputAssignment; - delete[] inputAssignment; -} - -void Engine::checkOverallProgress() -{ - // Get fresh statistics - unsigned numVisitedStates = - _statistics.getUnsignedAttribute( Statistics::NUM_VISITED_TREE_STATES ); - unsigned long long currentIteration = - _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ); - - if ( numVisitedStates > _lastNumVisitedStates ) - { - // Progress has been made - _lastNumVisitedStates = numVisitedStates; - _lastIterationWithProgress = currentIteration; - } - else - { - // No progress has been made. If it's been too long, request a restoration - if ( currentIteration > - _lastIterationWithProgress + GlobalConfiguration::MAX_ITERATIONS_WITHOUT_PROGRESS ) - { - ENGINE_LOG( - "checkOverallProgress detected cycling. Requesting a precision restoration" ); - _basisRestorationRequired = Engine::STRONG_RESTORATION_NEEDED; - _lastIterationWithProgress = currentIteration; - } - } -} - -void Engine::updateDirections() -{ - if ( GlobalConfiguration::USE_POLARITY_BASED_DIRECTION_HEURISTICS ) - for ( const auto &constraint : _plConstraints ) - if ( constraint->supportPolarity() && constraint->isActive() && - !constraint->phaseFixed() ) - constraint->updateDirection(); -} - -void Engine::decideBranchingHeuristics() -{ - DivideStrategy divideStrategy = Options::get()->getDivideStrategy(); - if ( divideStrategy == DivideStrategy::Auto ) - { - if ( !_produceUNSATProofs && !_preprocessedQuery->getInputVariables().empty() && - _preprocessedQuery->getInputVariables().size() < - GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD ) - { - // NOTE: the benefit of input splitting is minimal with abstract intepretation disabled. - // Therefore, since the proof production mode does not currently support that, we do - // not perform input-splitting in proof production mode. - divideStrategy = DivideStrategy::LargestInterval; - if ( _verbosity >= 2 ) - printf( "Branching heuristics set to LargestInterval\n" ); - } - else - { - if ( GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH ) - { - divideStrategy = DivideStrategy::PseudoImpact; - if ( _verbosity >= 2 ) - printf( "Branching heuristics set to PseudoImpact\n" ); - } - else - { - divideStrategy = DivideStrategy::ReLUViolation; - if ( _verbosity >= 2 ) - printf( "Branching heuristics set to ReLUViolation\n" ); - } - } - } - ASSERT( divideStrategy != DivideStrategy::Auto ); - _smtCore.setBranchingHeuristics( divideStrategy ); - _smtCore.initializeScoreTrackerIfNeeded( _plConstraints ); -} - -PiecewiseLinearConstraint *Engine::pickSplitPLConstraintBasedOnPolarity() -{ - ENGINE_LOG( Stringf( "Using Polarity-based heuristics..." ).ascii() ); - - if ( !_networkLevelReasoner ) - return NULL; - - List constraints = - _networkLevelReasoner->getConstraintsInTopologicalOrder(); - - Map scoreToConstraint; - for ( auto &plConstraint : constraints ) - { - if ( plConstraint->supportPolarity() && plConstraint->isActive() && - !plConstraint->phaseFixed() ) - { - plConstraint->updateScoreBasedOnPolarity(); - scoreToConstraint[plConstraint->getScore()] = plConstraint; - if ( scoreToConstraint.size() >= GlobalConfiguration::POLARITY_CANDIDATES_THRESHOLD ) - break; - } - } - if ( scoreToConstraint.size() > 0 ) - { - ENGINE_LOG( Stringf( "Score of the picked ReLU: %f", ( *scoreToConstraint.begin() ).first ) - .ascii() ); - return ( *scoreToConstraint.begin() ).second; - } - else - return NULL; -} - -PiecewiseLinearConstraint *Engine::pickSplitPLConstraintBasedOnTopology() -{ - // We push the first unfixed ReLU in the topology order to the _candidatePlConstraints - ENGINE_LOG( Stringf( "Using EarliestReLU heuristics..." ).ascii() ); - - if ( !_networkLevelReasoner ) - throw MarabouError( MarabouError::NETWORK_LEVEL_REASONER_NOT_AVAILABLE ); - - List constraints = - _networkLevelReasoner->getConstraintsInTopologicalOrder(); - - for ( auto &plConstraint : constraints ) - { - if ( plConstraint->isActive() && !plConstraint->phaseFixed() ) - return plConstraint; - } - return NULL; -} - -PiecewiseLinearConstraint *Engine::pickSplitPLConstraintBasedOnIntervalWidth() -{ - // We push the first unfixed ReLU in the topology order to the _candidatePlConstraints - ENGINE_LOG( Stringf( "Using LargestInterval heuristics..." ).ascii() ); - - unsigned inputVariableWithLargestInterval = 0; - double largestIntervalSoFar = 0; - for ( const auto &variable : _preprocessedQuery->getInputVariables() ) - { - double interval = _tableau->getUpperBound( variable ) - _tableau->getLowerBound( variable ); - if ( interval > largestIntervalSoFar ) - { - inputVariableWithLargestInterval = variable; - largestIntervalSoFar = interval; - } - } - - if ( largestIntervalSoFar == 0 ) - return NULL; - else - { - double mid = ( _tableau->getLowerBound( inputVariableWithLargestInterval ) + - _tableau->getUpperBound( inputVariableWithLargestInterval ) ) / - 2; - PiecewiseLinearCaseSplit s1; - s1.storeBoundTightening( - Tightening( inputVariableWithLargestInterval, mid, Tightening::UB ) ); - PiecewiseLinearCaseSplit s2; - s2.storeBoundTightening( - Tightening( inputVariableWithLargestInterval, mid, Tightening::LB ) ); - - List splits; - splits.append( s1 ); - splits.append( s2 ); - _disjunctionForSplitting = - std::unique_ptr( new DisjunctionConstraint( splits ) ); - return _disjunctionForSplitting.get(); - } -} - -PiecewiseLinearConstraint *Engine::pickSplitPLConstraint( DivideStrategy strategy ) -{ - ENGINE_LOG( Stringf( "Picking a split PLConstraint..." ).ascii() ); - - PiecewiseLinearConstraint *candidatePLConstraint = NULL; - if ( strategy == DivideStrategy::PseudoImpact ) - { - if ( _smtCore.getStackDepth() > 3 ) - candidatePLConstraint = _smtCore.getConstraintsWithHighestScore(); - else if ( !_preprocessedQuery->getInputVariables().empty() && - _preprocessedQuery->getInputVariables().size() < - GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD ) - candidatePLConstraint = pickSplitPLConstraintBasedOnIntervalWidth(); - else - { - candidatePLConstraint = pickSplitPLConstraintBasedOnPolarity(); - if ( candidatePLConstraint == NULL ) - candidatePLConstraint = _smtCore.getConstraintsWithHighestScore(); - } - } - else if ( strategy == DivideStrategy::Polarity ) - candidatePLConstraint = pickSplitPLConstraintBasedOnPolarity(); - else if ( strategy == DivideStrategy::EarliestReLU ) - candidatePLConstraint = pickSplitPLConstraintBasedOnTopology(); - else if ( strategy == DivideStrategy::LargestInterval && - ( ( _smtCore.getStackDepth() + 1 ) % - GlobalConfiguration::INTERVAL_SPLITTING_FREQUENCY != - 0 ) ) - { - // Conduct interval splitting periodically. - candidatePLConstraint = pickSplitPLConstraintBasedOnIntervalWidth(); - } - ENGINE_LOG( - Stringf( ( candidatePLConstraint ? "Picked..." - : "Unable to pick using the current strategy..." ) ) - .ascii() ); - return candidatePLConstraint; -} - -PiecewiseLinearConstraint *Engine::pickSplitPLConstraintSnC( SnCDivideStrategy strategy ) -{ - PiecewiseLinearConstraint *candidatePLConstraint = NULL; - if ( strategy == SnCDivideStrategy::Polarity ) - candidatePLConstraint = pickSplitPLConstraintBasedOnPolarity(); - else if ( strategy == SnCDivideStrategy::EarliestReLU ) - candidatePLConstraint = pickSplitPLConstraintBasedOnTopology(); - - ENGINE_LOG( Stringf( "Done updating scores..." ).ascii() ); - ENGINE_LOG( - Stringf( ( candidatePLConstraint ? "Picked..." - : "Unable to pick using the current strategy..." ) ) - .ascii() ); - return candidatePLConstraint; -} - -bool Engine::restoreSmtState( SmtState &smtState ) -{ - try - { - ASSERT( _smtCore.getStackDepth() == 0 ); - - // Step 1: all implied valid splits at root - for ( auto &validSplit : smtState._impliedValidSplitsAtRoot ) - { - applySplit( validSplit ); - _smtCore.recordImpliedValidSplit( validSplit ); - } - - tightenBoundsOnConstraintMatrix(); - _boundManager.propagateTightenings(); - // For debugging purposes - checkBoundCompliancyWithDebugSolution(); - do - performSymbolicBoundTightening(); - while ( applyAllValidConstraintCaseSplits() ); - - // Step 2: replay the stack - for ( auto &stackEntry : smtState._stack ) - { - _smtCore.replaySmtStackEntry( stackEntry ); - // Do all the bound propagation, and set ReLU constraints to inactive (at - // least the one corresponding to the _activeSplit applied above. - tightenBoundsOnConstraintMatrix(); - - // For debugging purposes - checkBoundCompliancyWithDebugSolution(); - do - performSymbolicBoundTightening(); - while ( applyAllValidConstraintCaseSplits() ); - } - _boundManager.propagateTightenings(); - } - catch ( const InfeasibleQueryException & ) - { - // The current query is unsat, and we need to pop. - // If we're at level 0, the whole query is unsat. - if ( _produceUNSATProofs ) - explainSimplexFailure(); - - if ( !_smtCore.popSplit() ) - { - if ( _verbosity > 0 ) - { - printf( "\nEngine::solve: UNSAT query\n" ); - _statistics.print(); - } - _exitCode = Engine::UNSAT; - for ( PiecewiseLinearConstraint *p : _plConstraints ) - p->setActiveConstraint( true ); - return false; - } - } - return true; -} - -void Engine::storeSmtState( SmtState &smtState ) -{ - _smtCore.storeSmtState( smtState ); -} - -bool Engine::solveWithMILPEncoding( double timeoutInSeconds ) -{ - try - { - if ( _lpSolverType == LPSolverType::NATIVE && _tableau->basisMatrixAvailable() ) - { - explicitBasisBoundTightening(); - applyAllBoundTightenings(); - applyAllValidConstraintCaseSplits(); - } - - while ( applyAllValidConstraintCaseSplits() ) - { - performSymbolicBoundTightening(); - } - } - catch ( const InfeasibleQueryException & ) - { - _exitCode = Engine::UNSAT; - return false; - } - - ENGINE_LOG( "Encoding the input query with Gurobi...\n" ); - _gurobi = std::unique_ptr( new GurobiWrapper() ); - _tableau->setGurobi( &( *_gurobi ) ); - _milpEncoder = std::unique_ptr( new MILPEncoder( *_tableau ) ); - _milpEncoder->encodeQuery( *_gurobi, *_preprocessedQuery ); - ENGINE_LOG( "Query encoded in Gurobi...\n" ); - - double timeoutForGurobi = ( timeoutInSeconds == 0 ? FloatUtils::infinity() : timeoutInSeconds ); - ENGINE_LOG( Stringf( "Gurobi timeout set to %f\n", timeoutForGurobi ).ascii() ) - _gurobi->setTimeLimit( timeoutForGurobi ); - if ( !_sncMode ) - _gurobi->setNumberOfThreads( Options::get()->getInt( Options::NUM_WORKERS ) ); - _gurobi->setVerbosity( _verbosity > 0 ); - _gurobi->solve(); - - if ( _gurobi->haveFeasibleSolution() ) - { - if ( allNonlinearConstraintsHold() ) - { - _exitCode = IEngine::SAT; - return true; - } - else - { - _exitCode = IEngine::UNKNOWN; - return false; - } - } - else if ( _gurobi->infeasible() ) - _exitCode = IEngine::UNSAT; - else if ( _gurobi->timeout() ) - _exitCode = IEngine::TIMEOUT; - else - throw NLRError( NLRError::UNEXPECTED_RETURN_STATUS_FROM_GUROBI ); - return false; -} - -bool Engine::preprocessingEnabled() const -{ - return _preprocessingEnabled; -} - -Preprocessor *Engine::getPreprocessor() -{ - return &_preprocessor; -} - -bool Engine::performDeepSoILocalSearch() -{ - ENGINE_LOG( "Performing local search..." ); - struct timespec start = TimeUtils::sampleMicro(); - ASSERT( allVarsWithinBounds() ); - - // All the linear constraints have been satisfied at this point. - // Update the cost function - _soiManager->initializePhasePattern(); - - LinearExpression initialPhasePattern = _soiManager->getCurrentSoIPhasePattern(); - - if ( initialPhasePattern.isZero() ) - { - if ( hasBranchingCandidate() ) - while ( !_smtCore.needToSplit() ) - _smtCore.reportRejectedPhasePatternProposal(); - return false; - } - - minimizeHeuristicCost( initialPhasePattern ); - ASSERT( allVarsWithinBounds() ); - _soiManager->updateCurrentPhasePatternForSatisfiedPLConstraints(); - // Always accept the first phase pattern. - _soiManager->acceptCurrentPhasePattern(); - double costOfLastAcceptedPhasePattern = - computeHeuristicCost( _soiManager->getCurrentSoIPhasePattern() ); - - double costOfProposedPhasePattern = FloatUtils::infinity(); - bool lastProposalAccepted = true; - while ( !_smtCore.needToSplit() ) - { - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_LOCAL_SEARCH_MICRO, - TimeUtils::timePassed( start, end ) ); - start = end; - - if ( lastProposalAccepted ) - { - /* - Check whether the optimal solution to the last accepted phase - is a real solution. We only check this when the last proposal - was accepted, because rejected phase pattern must have resulted in - increase in the SoI cost. - - HW: Another option is to only do this check when - costOfLastAcceptedPhasePattern is 0, but this might be too strict. - The overhead is low anyway. - */ - collectViolatedPlConstraints(); - if ( allPlConstraintsHold() ) - { - if ( _lpSolverType == LPSolverType::NATIVE && - _tableau->getBasicAssignmentStatus() != - ITableau::BASIC_ASSIGNMENT_JUST_COMPUTED ) - { - if ( _verbosity > 0 ) - { - printf( "Before declaring sat, recomputing...\n" ); - } - // Make sure that the assignment is precise before declaring success - _tableau->computeAssignment(); - // If we actually have a real satisfying assignment, - return false; - } - else - { - ENGINE_LOG( "Performing local search - done." ); - return true; - } - } - else if ( FloatUtils::isZero( costOfLastAcceptedPhasePattern ) ) - { - // Corner case: the SoI is minimal but there are still some PL - // constraints (those not in the SoI) unsatisfied. - // In this case, we bump up the score of PLConstraints not in - // the SoI with the hope to branch on them early. - bumpUpPseudoImpactOfPLConstraintsNotInSoI(); - if ( hasBranchingCandidate() ) - while ( !_smtCore.needToSplit() ) - _smtCore.reportRejectedPhasePatternProposal(); - return false; - } - } - - // No satisfying assignment found for the last accepted phase pattern, - // propose an update to it. - _soiManager->proposePhasePatternUpdate(); - minimizeHeuristicCost( _soiManager->getCurrentSoIPhasePattern() ); - _soiManager->updateCurrentPhasePatternForSatisfiedPLConstraints(); - costOfProposedPhasePattern = - computeHeuristicCost( _soiManager->getCurrentSoIPhasePattern() ); - - // We have the "local" effect of change the cost term of some - // PLConstraints in the phase pattern. Use this information to influence - // the branching decision. - updatePseudoImpactWithSoICosts( costOfLastAcceptedPhasePattern, - costOfProposedPhasePattern ); - - // Decide whether to accept the last proposal. - if ( _soiManager->decideToAcceptCurrentProposal( costOfLastAcceptedPhasePattern, - costOfProposedPhasePattern ) ) - { - _soiManager->acceptCurrentPhasePattern(); - costOfLastAcceptedPhasePattern = costOfProposedPhasePattern; - lastProposalAccepted = true; - } - else - { - _smtCore.reportRejectedPhasePatternProposal(); - lastProposalAccepted = false; - } - } - - ENGINE_LOG( "Performing local search - done" ); - return false; -} - -void Engine::minimizeHeuristicCost( const LinearExpression &heuristicCost ) -{ - ENGINE_LOG( "Optimizing w.r.t. the current heuristic cost..." ); - - if ( _lpSolverType == LPSolverType::GUROBI ) - { - minimizeCostWithGurobi( heuristicCost ); - - ENGINE_LOG( - Stringf( "Current heuristic cost: %f", _gurobi->getOptimalCostOrObjective() ).ascii() ); - } - else - { - _tableau->toggleOptimization( true ); - - _heuristicCost = heuristicCost; - - bool localOptimumReached = false; - while ( !localOptimumReached ) - { - DEBUG( _tableau->verifyInvariants() ); - - mainLoopStatistics(); - if ( _verbosity > 1 && - _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ) % - _statisticsPrintingFrequency == - 0 ) - _statistics.print(); - - if ( !allVarsWithinBounds() ) - throw VariableOutOfBoundDuringOptimizationException(); - - if ( performPrecisionRestorationIfNeeded() ) - continue; - - ASSERT( allVarsWithinBounds() ); - - localOptimumReached = performSimplexStep(); - } - _tableau->toggleOptimization( false ); - ENGINE_LOG( Stringf( "Current heuristic cost: %f", computeHeuristicCost( heuristicCost ) ) - .ascii() ); - } - - ENGINE_LOG( "Optimizing w.r.t. the current heuristic cost - done\n" ); -} - -double Engine::computeHeuristicCost( const LinearExpression &heuristicCost ) -{ - return ( _costFunctionManager->computeGivenCostFunctionDirectly( heuristicCost._addends ) + - heuristicCost._constant ); -} - -void Engine::updatePseudoImpactWithSoICosts( double costOfLastAcceptedPhasePattern, - double costOfProposedPhasePattern ) -{ - ASSERT( _soiManager ); - - const List &constraintsUpdated = - _soiManager->getConstraintsUpdatedInLastProposal(); - // Score is divided by the number of updated constraints in the last - // proposal. In the Sum of Infeasibilities paper, only one constraint - // is updated each time. But we might consider alternative proposal - // strategy in the future. - double score = ( fabs( costOfLastAcceptedPhasePattern - costOfProposedPhasePattern ) / - constraintsUpdated.size() ); - - ASSERT( constraintsUpdated.size() > 0 ); - // Update the Pseudo-Impact estimation. - for ( const auto &constraint : constraintsUpdated ) - _smtCore.updatePLConstraintScore( constraint, score ); -} - -void Engine::bumpUpPseudoImpactOfPLConstraintsNotInSoI() -{ - ASSERT( _soiManager ); - for ( const auto &plConstraint : _plConstraints ) - { - if ( plConstraint->isActive() && !plConstraint->supportSoI() && - !plConstraint->phaseFixed() && !plConstraint->satisfied() ) - _smtCore.updatePLConstraintScore( - plConstraint, GlobalConfiguration::SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI ); - } -} - -void Engine::informLPSolverOfBounds() -{ - if ( _lpSolverType == LPSolverType::GUROBI ) - { - struct timespec start = TimeUtils::sampleMicro(); - for ( unsigned i = 0; i < _preprocessedQuery->getNumberOfVariables(); ++i ) - { - String variableName = _milpEncoder->getVariableNameFromVariable( i ); - _gurobi->setLowerBound( variableName, _tableau->getLowerBound( i ) ); - _gurobi->setUpperBound( variableName, _tableau->getUpperBound( i ) ); - } - _gurobi->updateModel(); - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_ADDING_CONSTRAINTS_TO_MILP_SOLVER_MICRO, - TimeUtils::timePassed( start, end ) ); - } - else - { - // Bounds are already up-to-date in Tableau when using native Simplex. - return; - } -} - -bool Engine::minimizeCostWithGurobi( const LinearExpression &costFunction ) -{ - ASSERT( _gurobi && _milpEncoder ); - - struct timespec simplexStart = TimeUtils::sampleMicro(); - - _milpEncoder->encodeCostFunction( *_gurobi, costFunction ); - _gurobi->setTimeLimit( FloatUtils::infinity() ); - _gurobi->solve(); - - struct timespec simplexEnd = TimeUtils::sampleMicro(); - - _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, - TimeUtils::timePassed( simplexStart, simplexEnd ) ); - _statistics.incLongAttribute( Statistics::NUM_SIMPLEX_STEPS, - _gurobi->getNumberOfSimplexIterations() ); - - if ( _gurobi->infeasible() ) - throw InfeasibleQueryException(); - else if ( _gurobi->optimal() ) - return true; - else - throw CommonError( CommonError::UNEXPECTED_GUROBI_STATUS, - Stringf( "Current status: %u", _gurobi->getStatusCode() ).ascii() ); - - return false; -} - -void Engine::checkGurobiBoundConsistency() const -{ - if ( _gurobi && _milpEncoder ) - { - for ( unsigned i = 0; i < _preprocessedQuery->getNumberOfVariables(); ++i ) - { - String iName = _milpEncoder->getVariableNameFromVariable( i ); - double gurobiLowerBound = _gurobi->getLowerBound( iName ); - double lowerBound = _tableau->getLowerBound( i ); - if ( !FloatUtils::areEqual( gurobiLowerBound, lowerBound ) ) - { - throw MarabouError( MarabouError::BOUNDS_NOT_UP_TO_DATE_IN_LP_SOLVER, - Stringf( "x%u lower bound inconsistent!" - " Gurobi: %f, Tableau: %f", - i, - gurobiLowerBound, - lowerBound ) - .ascii() ); - } - double gurobiUpperBound = _gurobi->getUpperBound( iName ); - double upperBound = _tableau->getUpperBound( i ); - - if ( !FloatUtils::areEqual( gurobiUpperBound, upperBound ) ) - { - throw MarabouError( MarabouError::BOUNDS_NOT_UP_TO_DATE_IN_LP_SOLVER, - Stringf( "x%u upper bound inconsistent!" - " Gurobi: %f, Tableau: %f", - i, - gurobiUpperBound, - upperBound ) - .ascii() ); - } - } - } -} - -bool Engine::consistentBounds() const -{ - return _boundManager.consistentBounds(); -} - -Query Engine::buildQueryFromCurrentState() const -{ - Query query = *_preprocessedQuery; - for ( unsigned i = 0; i < query.getNumberOfVariables(); ++i ) - { - query.setLowerBound( i, _tableau->getLowerBound( i ) ); - query.setUpperBound( i, _tableau->getUpperBound( i ) ); - } - return query; -} - -void Engine::updateGroundUpperBound( const unsigned var, const double value ) -{ - ASSERT( var < _tableau->getN() && _produceUNSATProofs ); - if ( FloatUtils::lt( value, _groundBoundManager.getUpperBound( var ) ) ) - _groundBoundManager.setUpperBound( var, value ); -} - -void Engine::updateGroundLowerBound( const unsigned var, const double value ) -{ - ASSERT( var < _tableau->getN() && _produceUNSATProofs ); - if ( FloatUtils::gt( value, _groundBoundManager.getLowerBound( var ) ) ) - _groundBoundManager.setLowerBound( var, value ); -} - -double Engine::getGroundBound( unsigned var, bool isUpper ) const -{ - ASSERT( var < _tableau->getN() && _produceUNSATProofs ); - return isUpper ? _groundBoundManager.getUpperBound( var ) - : _groundBoundManager.getLowerBound( var ); -} - -bool Engine::shouldProduceProofs() const -{ - return _produceUNSATProofs; -} - -void Engine::explainSimplexFailure() -{ - ASSERT( _produceUNSATProofs ); - - DEBUG( checkGroundBounds() ); - - unsigned infeasibleVar = _boundManager.getInconsistentVariable(); - - if ( infeasibleVar == IBoundManager::NO_VARIABLE_FOUND || - !certifyInfeasibility( infeasibleVar ) ) - infeasibleVar = explainFailureWithTableau(); - - if ( infeasibleVar == IBoundManager::NO_VARIABLE_FOUND ) - infeasibleVar = explainFailureWithCostFunction(); - - if ( infeasibleVar == IBoundManager::NO_VARIABLE_FOUND ) - { - _costFunctionManager->computeCoreCostFunction(); - infeasibleVar = explainFailureWithCostFunction(); - } - - if ( infeasibleVar == IBoundManager::NO_VARIABLE_FOUND ) - { - markLeafToDelegate(); - return; - } - - ASSERT( infeasibleVar < _tableau->getN() ); - ASSERT( _UNSATCertificateCurrentPointer && - !( **_UNSATCertificateCurrentPointer ).getContradiction() ); - _statistics.incUnsignedAttribute( Statistics::NUM_CERTIFIED_LEAVES ); - - writeContradictionToCertificate( infeasibleVar ); - - ( **_UNSATCertificateCurrentPointer ).makeLeaf(); -} - -bool Engine::certifyInfeasibility( unsigned var ) const -{ - ASSERT( _produceUNSATProofs ); - - Vector contradiction = computeContradiction( var ); - - if ( contradiction.empty() ) - return FloatUtils::isNegative( _groundBoundManager.getUpperBound( var ) - - _groundBoundManager.getLowerBound( var ) ); - - SparseUnsortedList sparseContradiction = SparseUnsortedList(); - - contradiction.empty() - ? sparseContradiction.initializeToEmpty() - : sparseContradiction.initialize( contradiction.data(), contradiction.size() ); - - // In case contradiction is a vector of zeros - if ( sparseContradiction.empty() ) - return FloatUtils::isNegative( _groundBoundManager.getUpperBound( var ) - - _groundBoundManager.getLowerBound( var ) ); - - double derivedBound = - UNSATCertificateUtils::computeCombinationUpperBound( sparseContradiction, - _tableau->getSparseA(), - _groundBoundManager.getUpperBounds(), - _groundBoundManager.getLowerBounds(), - _tableau->getN() ); - return FloatUtils::isNegative( derivedBound ); -} - -double Engine::explainBound( unsigned var, bool isUpper ) const -{ - ASSERT( _produceUNSATProofs ); - - SparseUnsortedList explanation( 0 ); - - if ( !_boundManager.isExplanationTrivial( var, isUpper ) ) - explanation = _boundManager.getExplanation( var, isUpper ); - - if ( explanation.empty() ) - return isUpper ? _groundBoundManager.getUpperBound( var ) - : _groundBoundManager.getLowerBound( var ); - - return UNSATCertificateUtils::computeBound( var, - isUpper, - explanation, - _tableau->getSparseA(), - _groundBoundManager.getUpperBounds(), - _groundBoundManager.getLowerBounds(), - _tableau->getN() ); -} - -bool Engine::validateBounds( unsigned var, double epsilon, bool isUpper ) const -{ - ASSERT( _produceUNSATProofs ); - - double explained, real; - explained = explainBound( var, isUpper ); - if ( isUpper ) - { - real = _boundManager.getUpperBound( var ); - if ( explained - real > epsilon ) - { - ENGINE_LOG( "Var %d. Computed Upper %.5lf, real %.5lf. Difference is %.10lf\n", - var, - explained, - real, - abs( explained - real ) ); - return false; - } - } - else - { - real = _boundManager.getLowerBound( var ); - if ( explained - real < -epsilon ) - { - ENGINE_LOG( "Var %d. Computed Lower %.5lf, real %.5lf. Difference is %.10lf\n", - var, - explained, - real, - abs( explained - real ) ); - return false; - } - } - return true; -} - -bool Engine::validateAllBounds( double epsilon ) const -{ - ASSERT( _produceUNSATProofs ); - - bool res = true; - - for ( unsigned var = 0; var < _tableau->getN(); ++var ) - if ( !validateBounds( var, epsilon, Tightening::UB ) || - !validateBounds( var, epsilon, Tightening::LB ) ) - res = false; - - return res; -} - -bool Engine::checkGroundBounds() const -{ - ASSERT( _produceUNSATProofs ); - - for ( unsigned i = 0; i < _tableau->getN(); ++i ) - { - if ( FloatUtils::gt( _groundBoundManager.getLowerBound( i ), - _boundManager.getLowerBound( i ) ) || - FloatUtils::lt( _groundBoundManager.getUpperBound( i ), - _boundManager.getUpperBound( i ) ) ) - return false; - } - return true; -} - -unsigned Engine::explainFailureWithTableau() -{ - ASSERT( _produceUNSATProofs ); - - // Failure of a simplex step implies infeasible bounds imposed by the row - TableauRow boundUpdateRow = TableauRow( _tableau->getN() ); - - // For every basic, check that is has no slack and its explanations indeed prove a - // contradiction - unsigned basicVar; - - for ( unsigned i = 0; i < _tableau->getM(); ++i ) - { - if ( _tableau->basicOutOfBounds( i ) ) - { - _tableau->getTableauRow( i, &boundUpdateRow ); - basicVar = boundUpdateRow._lhs; - - if ( FloatUtils::gt( _boundManager.computeRowBound( boundUpdateRow, Tightening::LB ), - _boundManager.getUpperBound( basicVar ) ) && - explainAndCheckContradiction( basicVar, Tightening::LB, &boundUpdateRow ) ) - return basicVar; - - if ( FloatUtils::lt( _boundManager.computeRowBound( boundUpdateRow, Tightening::UB ), - _boundManager.getLowerBound( basicVar ) ) && - explainAndCheckContradiction( basicVar, Tightening::UB, &boundUpdateRow ) ) - return basicVar; - } - } - - return IBoundManager::NO_VARIABLE_FOUND; -} - -unsigned Engine::explainFailureWithCostFunction() -{ - ASSERT( _produceUNSATProofs ); - - // Failure of a simplex step might imply infeasible bounds imposed by the cost function - unsigned curBasicVar; - unsigned infVar = IBoundManager::NO_VARIABLE_FOUND; - double curCost; - bool curUpper; - const SparseUnsortedList *costRow = _costFunctionManager->createRowOfCostFunction(); - - for ( unsigned i = 0; i < _tableau->getM(); ++i ) - { - curBasicVar = _tableau->basicIndexToVariable( i ); - curCost = _costFunctionManager->getBasicCost( i ); - - if ( FloatUtils::isZero( curCost ) ) - continue; - - curUpper = ( curCost < 0 ); - - // Check the basic variable has no slack - if ( !( !curUpper && FloatUtils::gt( _boundManager.computeSparseRowBound( - *costRow, Tightening::LB, curBasicVar ), - _boundManager.getUpperBound( curBasicVar ) ) ) && - !( curUpper && FloatUtils::lt( _boundManager.computeSparseRowBound( - *costRow, Tightening::UB, curBasicVar ), - _boundManager.getLowerBound( curBasicVar ) ) ) ) - - continue; - - // Check the explanation indeed proves a contradiction - if ( explainAndCheckContradiction( curBasicVar, curUpper, costRow ) ) - { - infVar = curBasicVar; - break; - } - } - - delete costRow; - return infVar; -} - -bool Engine::explainAndCheckContradiction( unsigned var, bool isUpper, const TableauRow *row ) -{ - ASSERT( _produceUNSATProofs ); - - SparseUnsortedList backup( 0 ); - backup = _boundManager.getExplanation( var, isUpper ); - - _boundManager.updateBoundExplanation( *row, isUpper, var ); - - // Ensure the proof is correct - if ( certifyInfeasibility( var ) ) - return true; - - // If not, restores previous certificate if the proof is wrong - _boundManager.setExplanation( backup, var, isUpper ); - - return false; -} - -bool Engine::explainAndCheckContradiction( unsigned var, - bool isUpper, - const SparseUnsortedList *row ) -{ - ASSERT( _produceUNSATProofs ); - - SparseUnsortedList backup( 0 ); - backup = _boundManager.getExplanation( var, isUpper ); - - _boundManager.updateBoundExplanationSparse( *row, isUpper, var ); - - // Ensure the proof is correct - if ( certifyInfeasibility( var ) ) - return true; - - // If not, restores previous certificate if the proof is wrong - _boundManager.setExplanation( backup, var, isUpper ); - - return false; -} - -UnsatCertificateNode *Engine::getUNSATCertificateCurrentPointer() const -{ - return _UNSATCertificateCurrentPointer->get(); -} - -void Engine::setUNSATCertificateCurrentPointer( UnsatCertificateNode *node ) -{ - _UNSATCertificateCurrentPointer->set( node ); -} - -const UnsatCertificateNode *Engine::getUNSATCertificateRoot() const -{ - return _UNSATCertificate; -} - -bool Engine::certifyUNSATCertificate() -{ - ASSERT( _produceUNSATProofs && _UNSATCertificate && !_smtCore.getStackDepth() ); - - for ( auto &constraint : _plConstraints ) - { - if ( !UNSATCertificateUtils::getSupportedActivations().exists( constraint->getType() ) ) - { - String activationType = constraint->serializeToString().tokenize( "," ).back(); - printf( "Certification Error! Network contains activation function %s, that is not yet " - "supported by Marabou certification.\n", - activationType.ascii() ); - return false; - } - } - - struct timespec certificationStart = TimeUtils::sampleMicro(); - _precisionRestorer.restoreInitialEngineState( *this ); - - Vector groundUpperBounds( _tableau->getN(), 0 ); - Vector groundLowerBounds( _tableau->getN(), 0 ); - - for ( unsigned i = 0; i < _tableau->getN(); ++i ) - { - groundUpperBounds[i] = _groundBoundManager.getUpperBound( i ); - groundLowerBounds[i] = _groundBoundManager.getLowerBound( i ); - } - - if ( GlobalConfiguration::WRITE_JSON_PROOF ) - { - File file( JsonWriter::PROOF_FILENAME ); - JsonWriter::writeProofToJson( _UNSATCertificate, - _tableau->getM(), - _tableau->getSparseA(), - groundUpperBounds, - groundLowerBounds, - _plConstraints, - file ); - } - - Checker unsatCertificateChecker( _UNSATCertificate, - _tableau->getM(), - _tableau->getSparseA(), - groundUpperBounds, - groundLowerBounds, - _plConstraints ); - bool certificationSucceeded = unsatCertificateChecker.check(); - - _statistics.setLongAttribute( - Statistics::TOTAL_CERTIFICATION_TIME, - TimeUtils::timePassed( certificationStart, TimeUtils::sampleMicro() ) ); - printf( "Certification time: " ); - _statistics.printLongAttributeAsTime( - _statistics.getLongAttribute( Statistics::TOTAL_CERTIFICATION_TIME ) ); - - if ( certificationSucceeded ) - { - printf( "Certified\n" ); - _statistics.incUnsignedAttribute( Statistics::CERTIFIED_UNSAT ); - if ( _statistics.getUnsignedAttribute( Statistics::NUM_DELEGATED_LEAVES ) ) - printf( "Some leaves were delegated and need to be certified separately by an SMT " - "solver\n" ); - } - else - printf( "Error certifying UNSAT certificate\n" ); - - DEBUG( { - ASSERT( certificationSucceeded ); - if ( _statistics.getUnsignedAttribute( Statistics::NUM_POPS ) ) - { - double delegationRatio = - _statistics.getUnsignedAttribute( Statistics::NUM_DELEGATED_LEAVES ) / - _statistics.getUnsignedAttribute( Statistics::NUM_CERTIFIED_LEAVES ); - ASSERT( FloatUtils::lt( delegationRatio, 0.01 ) ); - } - } ); - - return certificationSucceeded; -} - -void Engine::markLeafToDelegate() -{ - ASSERT( _produceUNSATProofs ); - - // Mark leaf with toDelegate Flag - UnsatCertificateNode *currentUnsatCertificateNode = _UNSATCertificateCurrentPointer->get(); - ASSERT( _UNSATCertificateCurrentPointer && !currentUnsatCertificateNode->getContradiction() ); - currentUnsatCertificateNode->setDelegationStatus( DelegationStatus::DELEGATE_SAVE ); - currentUnsatCertificateNode->deletePLCExplanations(); - _statistics.incUnsignedAttribute( Statistics::NUM_DELEGATED_LEAVES ); - - if ( !currentUnsatCertificateNode->getChildren().empty() ) - currentUnsatCertificateNode->makeLeaf(); -} - -const Vector Engine::computeContradiction( unsigned infeasibleVar ) const -{ - ASSERT( _produceUNSATProofs ); - - unsigned m = _tableau->getM(); - SparseUnsortedList upperBoundExplanation( 0 ); - SparseUnsortedList lowerBoundExplanation( 0 ); - - if ( !_boundManager.isExplanationTrivial( infeasibleVar, Tightening::UB ) ) - upperBoundExplanation = _boundManager.getExplanation( infeasibleVar, Tightening::UB ); - - if ( !_boundManager.isExplanationTrivial( infeasibleVar, Tightening::LB ) ) - lowerBoundExplanation = _boundManager.getExplanation( infeasibleVar, Tightening::LB ); - - if ( upperBoundExplanation.empty() && lowerBoundExplanation.empty() ) - return Vector( 0 ); - - Vector contradiction = Vector( m, 0 ); - - if ( !upperBoundExplanation.empty() ) - for ( const auto &entry : upperBoundExplanation ) - contradiction[entry._index] = entry._value; - - if ( !lowerBoundExplanation.empty() ) - for ( const auto &entry : lowerBoundExplanation ) - contradiction[entry._index] -= entry._value; - - return contradiction; -} - -void Engine::writeContradictionToCertificate( unsigned infeasibleVar ) const -{ - ASSERT( _produceUNSATProofs ); - - Vector leafContradictionVec = computeContradiction( infeasibleVar ); - - Contradiction *leafContradiction = leafContradictionVec.empty() - ? new Contradiction( infeasibleVar ) - : new Contradiction( leafContradictionVec ); - ( **_UNSATCertificateCurrentPointer ).setContradiction( leafContradiction ); -} - -const BoundExplainer *Engine::getBoundExplainer() const -{ - return _boundManager.getBoundExplainer(); -} - -void Engine::setBoundExplainerContent( BoundExplainer *boundExplainer ) -{ - _boundManager.copyBoundExplainerContent( boundExplainer ); -} - -void Engine::propagateBoundManagerTightenings() -{ - _boundManager.propagateTightenings(); -} - -void Engine::extractBounds( IQuery &inputQuery ) -{ - for ( unsigned i = 0; i < inputQuery.getNumberOfVariables(); ++i ) - { - if ( _preprocessingEnabled ) - { - // Has the variable been merged into another? - unsigned variable = i; - while ( _preprocessor.variableIsMerged( variable ) ) - variable = _preprocessor.getMergedIndex( variable ); - - // Symbolically fixed variables are ignored - if ( _preprocessor.variableIsUnusedAndSymbolicallyFixed( i ) ) - { - inputQuery.tightenLowerBound( i, FloatUtils::negativeInfinity() ); - inputQuery.tightenUpperBound( i, FloatUtils::infinity() ); - continue; - } - - // Fixed variables are easy: return the value they've been fixed to. - if ( _preprocessor.variableIsFixed( variable ) ) - { - inputQuery.tightenLowerBound( i, _preprocessor.getFixedValue( variable ) ); - inputQuery.tightenUpperBound( i, _preprocessor.getFixedValue( variable ) ); - continue; - } - - // We know which variable to look for, but it may have been assigned - // a new index, due to variable elimination - variable = _preprocessor.getNewIndex( variable ); - - inputQuery.tightenLowerBound( i, _preprocessedQuery->getLowerBound( variable ) ); - inputQuery.tightenUpperBound( i, _preprocessedQuery->getUpperBound( variable ) ); - } - else - { - inputQuery.tightenLowerBound( i, _preprocessedQuery->getLowerBound( i ) ); - inputQuery.tightenUpperBound( i, _preprocessedQuery->getUpperBound( i ) ); - } - } -} - -void Engine::addPLCLemma( std::shared_ptr &explanation ) -{ - if ( !_produceUNSATProofs ) - return; - - ASSERT( explanation && _UNSATCertificate && _UNSATCertificateCurrentPointer ) - _statistics.incUnsignedAttribute( Statistics::NUM_LEMMAS ); - _UNSATCertificateCurrentPointer->get()->addPLCLemma( explanation ); -} \ No newline at end of file From 0d51b8a2ce21d16f61ee88bc4c3d644fdda2765b Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Dec 2024 20:58:53 +0200 Subject: [PATCH 34/81] Add files via upload --- src/engine/Engine.cpp | 8 +++++--- src/engine/MILPSolverBoundTighteningType.h | 3 ++- src/engine/PolygonalTightening.h | 23 ++++++++++++---------- 3 files changed, 20 insertions(+), 14 deletions(-) diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index 645b691ca3..e137c3cbb9 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -1579,9 +1579,11 @@ void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) // TODO: Remove this block after getting ready to support sigmoid with MILP Bound // Tightening. - if ( ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::MILP_ENCODING || - _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL || - _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION ) && + if ( ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::MILP_ENCODING || + _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL || + _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION ) && _preprocessedQuery->getNonlinearConstraints().size() > 0 ) throw MarabouError( MarabouError::FEATURE_NOT_YET_SUPPORTED, "Marabou doesn't support sigmoid with MILP Bound Tightening" ); diff --git a/src/engine/MILPSolverBoundTighteningType.h b/src/engine/MILPSolverBoundTighteningType.h index ad839706f7..760f82e8d1 100644 --- a/src/engine/MILPSolverBoundTighteningType.h +++ b/src/engine/MILPSolverBoundTighteningType.h @@ -36,7 +36,8 @@ enum class MILPSolverBoundTighteningType { BACKWARD_ANALYSIS_CONVERGE = 6, // Perform backward analysis using the INVPROP Algorithm (arXiv:2302.01404v4 [cs.LG]) BACKWARD_ANALYSIS_INVPROP = 7, - // Perform backward analysis using the PreimageApproximation Algorithm (arXiv:2305.03686v4 [cs.SE]) + // Perform backward analysis using the PreimageApproximation Algorithm (arXiv:2305.03686v4 + // [cs.SE]) BACKWARD_ANALYSIS_PREIMAGE_APPROX = 8, // Option to have no MILP bound tightening performed NONE = 10, diff --git a/src/engine/PolygonalTightening.h b/src/engine/PolygonalTightening.h index 538ed9cee0..0f811f6fe6 100644 --- a/src/engine/PolygonalTightening.h +++ b/src/engine/PolygonalTightening.h @@ -16,9 +16,10 @@ #ifndef __PolygonalTightening_h__ #define __PolygonalTightening_h__ -#include -#include "NeuronIndex.h" #include "Map.h" +#include "NeuronIndex.h" + +#include class PolygonalTightening { @@ -26,9 +27,11 @@ class PolygonalTightening enum PolygonalBoundType { LB = 0, UB = 1, - }; + }; - PolygonalTightening( Map neuronToCoefficient, double value, PolygonalBoundType type ) + PolygonalTightening( Map neuronToCoefficient, + double value, + PolygonalBoundType type ) : _neuronToCoefficient( neuronToCoefficient ) , _value( value ) , _type( type ) @@ -62,20 +65,20 @@ class PolygonalTightening bool currentFound = false; for ( const auto &otherPair : other._neuronToCoefficient ) { - currentFound |= ( pair.first._layer == otherPair.first._layer - && pair.first._neuron == otherPair.first._neuron - && pair.second == otherPair.second ); + currentFound |= ( pair.first._layer == otherPair.first._layer && + pair.first._neuron == otherPair.first._neuron && + pair.second == otherPair.second ); } allFound &= currentFound; } bool result = allFound && _value == other._value && _type == other._type; - return result; + return result; } void dump() const { printf( "PolygonalTightening: x %s %.2lf\n", _type == LB ? ">=" : "<=", _value ); - + for ( const auto &pair : _neuronToCoefficient ) { printf( "NeuronIndex: (layer %u, neuron %u), Coefficient: %.2lf )\n", @@ -94,4 +97,4 @@ class PolygonalTightening // tags-file-name: "../../TAGS" // c-basic-offset: 4 // End: -//s +// s From 32474bce5dfd329856d9b5c4133771ecb1599a7a Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Dec 2024 21:00:34 +0200 Subject: [PATCH 35/81] Add files via upload --- src/nlr/tests/Test_NetworkLevelReasoner.h | 2937 +++++++++++---------- 1 file changed, 1471 insertions(+), 1466 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 0c16ae1601..977faa4ed6 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -14,10 +14,10 @@ **/ #include "../../engine/tests/MockTableau.h" // TODO: fix this +#include "DeepPolySoftmaxElement.h" #include "FloatUtils.h" #include "Layer.h" #include "NetworkLevelReasoner.h" -#include "DeepPolySoftmaxElement.h" #include "Options.h" #include "Query.h" #include "Tightening.h" @@ -188,7 +188,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - + void populateNetworkWithAbs( NLR::NetworkLevelReasoner &nlr ) { /* @@ -260,7 +260,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - + void populateNetworkWithSign( NLR::NetworkLevelReasoner &nlr ) { /* @@ -332,7 +332,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - + void populateNetworkWithRound( NLR::NetworkLevelReasoner &nlr ) { /* @@ -404,7 +404,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - + void populateNetworkWithLeakyRelu( NLR::NetworkLevelReasoner &nlr ) { /* @@ -422,7 +422,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - + nlr.getLayer( 2 )->setAlpha( 0.1 ); nlr.getLayer( 4 )->setAlpha( 0.1 ); @@ -479,16 +479,15 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - - + void populateNetworkWithMax( NLR::NetworkLevelReasoner &nlr ) { /* a - x b e + x b e g - y c f + y c f d */ @@ -550,7 +549,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); } - + void populateNetworkWithSoftmax( NLR::NetworkLevelReasoner &nlr ) { /* @@ -630,14 +629,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - + void populateNetworkWithBilinear( NLR::NetworkLevelReasoner &nlr ) { /* a - x b e + x b e g - y c f + y c f d */ @@ -699,7 +698,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); } - + void populateNetworkWithAbsAndRelu( NLR::NetworkLevelReasoner &nlr ) { /* @@ -771,7 +770,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - + void populateNetworkWithRoundAndSign( NLR::NetworkLevelReasoner &nlr ) { /* @@ -843,7 +842,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - + void populateNetworkWithLeakyReluAndSigmoid( NLR::NetworkLevelReasoner &nlr ) { /* @@ -861,7 +860,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - + nlr.getLayer( 2 )->setAlpha( 0.1 ); // Mark layer dependencies @@ -917,7 +916,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - + void populateNetworkWithSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr ) { /* @@ -991,7 +990,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); } - + void populateNetworkWithReluAndBilinear( NLR::NetworkLevelReasoner &nlr ) { /* @@ -1059,7 +1058,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); } - + void populateNetworkSBTRelu( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1124,7 +1123,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); } - + void populateNetworkSBTReluResidual1( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1192,7 +1191,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 5, -large ); tableau.setUpperBound( 5, large ); } - + void populateNetworkSBTReluResidual2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1264,7 +1263,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); } - + void populateNetworkSBTReluReindex( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1365,7 +1364,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - + void populateNetworkSBTLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1470,7 +1469,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - + void populateNetworkSBTSigmoidsAndRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1553,7 +1552,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 9, -large ); tableau.setUpperBound( 9, large ); } - + void populateNetworkSBTMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1623,7 +1622,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 7, -large ); tableau.setUpperBound( 7, large ); } - + void populateNetworkSBTSoftmax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1725,7 +1724,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 10, -large ); tableau.setUpperBound( 10, large ); } - + void populateNetworkSBTSoftmax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1870,7 +1869,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 16, -large ); tableau.setUpperBound( 16, large ); } - + void populateNetworkSBTBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1932,7 +1931,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 5, -large ); tableau.setUpperBound( 5, large ); } - + void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1960,7 +1959,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); nlr.addLayer( 4, NLR::Layer::RELU, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - + nlr.getLayer( 2 )->setAlpha( 0.1 ); nlr.getLayer( 4 )->setAlpha( 0.1 ); @@ -2037,7 +2036,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - + void populateNetworkBackwardReLU2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -2048,31 +2047,31 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5 x9 x18 x20 x2 x13 x16 x6 x10 - + x3 = -x0 + x1 x4 = x0 + 2*x1 x5 = x0 + x1 + x2 x6 = 3*x0 - 2*x1 - x2 - + x7 = ReLU( x3 ) x8 = ReLU( x4 ) x9 = ReLU( x5 ) x10 = ReLU( x6 ) - + x11 = 2x7 + x9 - x10 + 2 x12 = -x7 + 2x8 + x10 x13 = x7 - x8 + 2x9 - 2 - + x14 = ReLU( x11 ) x15 = ReLU( x12 ) x16 = ReLU( x13 ) - + x17 = -x14 + x15 - x16 x18 = x14 + x15 + x16 - + x19 = ReLU( x17 ) x20 = ReLU( x18 ) - + x21 = x19 - x20 - 1 */ @@ -2118,7 +2117,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 4, 1, 5, 1, 1 ); nlr.setWeight( 4, 2, 5, 0, -1 ); nlr.setWeight( 4, 2, 5, 1, 1 ); - + nlr.setWeight( 6, 0, 7, 0, 1 ); nlr.setWeight( 6, 1, 7, 0, -1 ); @@ -2135,7 +2134,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); nlr.addActivationSource( 3, 2, 4, 2 ); - + nlr.addActivationSource( 5, 0, 6, 0 ); nlr.addActivationSource( 5, 1, 6, 1 ); @@ -2164,10 +2163,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); // Very loose bounds for neurons except inputs @@ -2213,7 +2212,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); } - + void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -2315,7 +2314,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - + void populateNetworkBackwardSigmoid2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -2326,31 +2325,31 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5 x9 x18 x20 x2 x13 x16 x6 x10 - + x3 = -x0 + x1 x4 = x0 + 2*x1 x5 = x0 + x1 + x2 x6 = 3*x0 - 2*x1 - x2 - + x7 = Sigmoid( x3 ) x8 = Sigmoid( x4 ) x9 = Sigmoid( x5 ) x10 = Sigmoid( x6 ) - + x11 = 2x7 + x9 - x10 + 2 x12 = -x7 + 2x8 + x10 x13 = x7 - x8 + 2x9 - 2 - + x14 = Sigmoid( x11 ) x15 = Sigmoid( x12 ) x16 = Sigmoid( x13 ) - + x17 = -x14 + x15 - x16 x18 = x14 + x15 + x16 - + x19 = Sigmoid( x17 ) x20 = Sigmoid( x18 ) - + x21 = x19 - x20 - 1 */ @@ -2396,7 +2395,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 4, 1, 5, 1, 1 ); nlr.setWeight( 4, 2, 5, 0, -1 ); nlr.setWeight( 4, 2, 5, 1, 1 ); - + nlr.setWeight( 6, 0, 7, 0, 1 ); nlr.setWeight( 6, 1, 7, 0, -1 ); @@ -2413,7 +2412,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); nlr.addActivationSource( 3, 2, 4, 2 ); - + nlr.addActivationSource( 5, 0, 6, 0 ); nlr.addActivationSource( 5, 1, 6, 1 ); @@ -2442,10 +2441,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); // Very loose bounds for neurons except inputs @@ -2491,7 +2490,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); } - + void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -2593,7 +2592,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - + void populateNetworkBackwardSign2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -2604,31 +2603,31 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5 x9 x18 x20 x2 x13 x16 x6 x10 - + x3 = -x0 + x1 x4 = x0 + 2*x1 x5 = x0 + x1 + x2 x6 = 3*x0 - 2*x1 - x2 - + x7 = Sign( x3 ) x8 = Sign( x4 ) x9 = SIgn( x5 ) x10 = Sign( x6 ) - + x11 = 2x7 + x9 - x10 + 2 x12 = -x7 + 2x8 + x10 x13 = x7 - x8 + 2x9 - 2 - + x14 = Sign( x11 ) x15 = Sign( x12 ) x16 = Sign( x13 ) - + x17 = -x14 + x15 - x16 x18 = x14 + x15 + x16 - + x19 = Sign( x17 ) x20 = Sign( x18 ) - + x21 = x19 - x20 - 1 */ @@ -2674,7 +2673,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 4, 1, 5, 1, 1 ); nlr.setWeight( 4, 2, 5, 0, -1 ); nlr.setWeight( 4, 2, 5, 1, 1 ); - + nlr.setWeight( 6, 0, 7, 0, 1 ); nlr.setWeight( 6, 1, 7, 0, -1 ); @@ -2691,7 +2690,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); nlr.addActivationSource( 3, 2, 4, 2 ); - + nlr.addActivationSource( 5, 0, 6, 0 ); nlr.addActivationSource( 5, 1, 6, 1 ); @@ -2720,10 +2719,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); // Very loose bounds for neurons except inputs @@ -2769,7 +2768,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); } - + void populateNetworkBackwardRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -2871,7 +2870,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - + void populateNetworkBackwardRound2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -2882,31 +2881,31 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5 x9 x18 x20 x2 x13 x16 x6 x10 - + x3 = -x0 + x1 x4 = x0 + 2*x1 x5 = x0 + x1 + x2 x6 = 3*x0 - 2*x1 - x2 - + x7 = Round( x3 ) x8 = Round( x4 ) x9 = Round( x5 ) x10 = Round( x6 ) - + x11 = 2x7 + x9 - x10 + 2 x12 = -x7 + 2x8 + x10 x13 = x7 - x8 + 2x9 - 2 - + x14 = Round( x11 ) x15 = Round( x12 ) x16 = Round( x13 ) - + x17 = -x14 + x15 - x16 x18 = x14 + x15 + x16 - + x19 = Round( x17 ) x20 = Round( x18 ) - + x21 = x19 - x20 - 1 */ @@ -2952,7 +2951,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 4, 1, 5, 1, 1 ); nlr.setWeight( 4, 2, 5, 0, -1 ); nlr.setWeight( 4, 2, 5, 1, 1 ); - + nlr.setWeight( 6, 0, 7, 0, 1 ); nlr.setWeight( 6, 1, 7, 0, -1 ); @@ -2969,7 +2968,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); nlr.addActivationSource( 3, 2, 4, 2 ); - + nlr.addActivationSource( 5, 0, 6, 0 ); nlr.addActivationSource( 5, 1, 6, 1 ); @@ -2998,10 +2997,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); // Very loose bounds for neurons except inputs @@ -3047,7 +3046,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); } - + void populateNetworkBackwardAbs( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -3149,7 +3148,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - + void populateNetworkBackwardAbs2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -3160,31 +3159,31 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5 x9 x18 x20 x2 x13 x16 x6 x10 - + x3 = -x0 + x1 x4 = x0 + 2*x1 x5 = x0 + x1 + x2 x6 = 3*x0 - 2*x1 - x2 - + x7 = Abs( x3 ) x8 = Abs( x4 ) x9 = Abs( x5 ) x10 = Abs( x6 ) - + x11 = 2x7 + x9 - x10 + 2 x12 = -x7 + 2x8 + x10 x13 = x7 - x8 + 2x9 - 2 - + x14 = Abs( x11 ) x15 = Abs( x12 ) x16 = Abs( x13 ) - + x17 = -x14 + x15 - x16 x18 = x14 + x15 + x16 - + x19 = Abs( x17 ) x20 = Abs( x18 ) - + x21 = x19 - x20 - 1 */ @@ -3230,7 +3229,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 4, 1, 5, 1, 1 ); nlr.setWeight( 4, 2, 5, 0, -1 ); nlr.setWeight( 4, 2, 5, 1, 1 ); - + nlr.setWeight( 6, 0, 7, 0, 1 ); nlr.setWeight( 6, 1, 7, 0, -1 ); @@ -3247,7 +3246,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); nlr.addActivationSource( 3, 2, 4, 2 ); - + nlr.addActivationSource( 5, 0, 6, 0 ); nlr.addActivationSource( 5, 1, 6, 1 ); @@ -3276,10 +3275,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); // Very loose bounds for neurons except inputs @@ -3325,7 +3324,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); } - + void populateNetworkBackwardLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -3352,7 +3351,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - + nlr.getLayer( 2 )->setAlpha( 0.1 ); nlr.getLayer( 4 )->setAlpha( 0.1 ); @@ -3429,7 +3428,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - + void populateNetworkBackwardLeakyRelu2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -3440,31 +3439,31 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5 x9 x18 x20 x2 x13 x16 x6 x10 - + x3 = -x0 + x1 x4 = x0 + 2*x1 x5 = x0 + x1 + x2 x6 = 3*x0 - 2*x1 - x2 - + x7 = LeakyReLU( x3 ) x8 = LeakyReLU( x4 ) x9 = LeakyReLU( x5 ) x10 = LeakyReLU( x6 ) - + x11 = 2x7 + x9 - x10 + 2 x12 = -x7 + 2x8 + x10 x13 = x7 - x8 + 2x9 - 2 - + x14 = LeakyReLU( x11 ) x15 = LeakyReLU( x12 ) x16 = LeakyReLU( x13 ) - + x17 = -x14 + x15 - x16 x18 = x14 + x15 + x16 - + x19 = LeakyReLU( x17 ) x20 = LeakyReLU( x18 ) - + x21 = x19 - x20 - 1 */ @@ -3477,7 +3476,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); nlr.addLayer( 6, NLR::Layer::LEAKY_RELU, 2 ); nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - + nlr.getLayer( 2 )->setAlpha( 0.1 ); nlr.getLayer( 4 )->setAlpha( 0.1 ); nlr.getLayer( 6 )->setAlpha( 0.1 ); @@ -3514,7 +3513,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 4, 1, 5, 1, 1 ); nlr.setWeight( 4, 2, 5, 0, -1 ); nlr.setWeight( 4, 2, 5, 1, 1 ); - + nlr.setWeight( 6, 0, 7, 0, 1 ); nlr.setWeight( 6, 1, 7, 0, -1 ); @@ -3531,7 +3530,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); nlr.addActivationSource( 3, 2, 4, 2 ); - + nlr.addActivationSource( 5, 0, 6, 0 ); nlr.addActivationSource( 5, 1, 6, 1 ); @@ -3560,10 +3559,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); // Very loose bounds for neurons except inputs @@ -3609,8 +3608,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); } - - void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + + void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) { /* a a' @@ -3708,32 +3708,33 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - - - void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + + + void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) { /* x3 x7 x0 x11 x14 x4 x8 x1 x12 x15 x17 - x5 x9 + x5 x9 x2 x13 x16 x6 x10 - + x3 = -x0 + x1 x4 = x0 + 2*x1 x5 = x0 + x1 + x2 x6 = 3*x0 - 2*x1 - x2 - + x7, x8, x9, x10 = Softmax( x2, x3, x4, x5 ) - + x11 = 2x7 + x9 - x10 + 2 x12 = -x7 + 2x8 + x10 x13 = x7 - x8 + 2x9 - 2 - + x14, x15, x16 = Softmax( x11, x12, x13 ) - + x17 = Max( x14, x15, x16 ) */ @@ -3801,7 +3802,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 2 ); nlr.addActivationSource( 3, 1, 4, 2 ); nlr.addActivationSource( 3, 2, 4, 2 ); - + nlr.addActivationSource( 4, 0, 5, 0 ); nlr.addActivationSource( 4, 1, 5, 0 ); nlr.addActivationSource( 4, 2, 5, 0 ); @@ -3866,8 +3867,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 17, -large ); tableau.setUpperBound( 17, large ); } - - void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + + void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) { /* a a' @@ -3959,8 +3961,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - - void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + + void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) { /* x3 x7 @@ -3970,23 +3973,23 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5 x9 x12 x14 x2 x6 x10 - + x3 = -x0 + x1 x4 = x0 + 2*x1 x5 = x0 + x1 + x2 x6 = 3*x0 - 2*x1 - x2 - + x7 = ReLU( x3 ) x8 = ReLU( x4 ) x9 = ReLU( x5 ) x10 = ReLU( x6 ) - + x11 = 2x7 + x9 - x10 + 2 x12 = -x7 + 2x8 + x10 - + x13 = ReLU( x11 ) x14 = ReLU( x12 ) - + x15 = x13 * x14 */ @@ -4031,7 +4034,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); - + nlr.addActivationSource( 4, 0, 5, 0 ); nlr.addActivationSource( 4, 1, 5, 0 ); @@ -4163,7 +4166,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); } - + void test_evaluate_abs() { NLR::NetworkLevelReasoner nlr; @@ -4200,7 +4203,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); } - + void test_evaluate_sign() { NLR::NetworkLevelReasoner nlr; @@ -4237,7 +4240,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); } - + void test_evaluate_round() { NLR::NetworkLevelReasoner nlr; @@ -4274,8 +4277,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); } - - void test_evaluate_leaky_relu() + + void test_evaluate_leaky_relu() { NLR::NetworkLevelReasoner nlr; @@ -4311,7 +4314,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); } - + void test_evaluate_max() { NLR::NetworkLevelReasoner nlr; @@ -4345,8 +4348,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); } - - + + void test_evaluate_softmax() { NLR::NetworkLevelReasoner nlr; @@ -4375,12 +4378,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // With Softmax, case 2 input[0] = -3; input[1] = 3; - + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); } - + void test_evaluate_bilinear() { NLR::NetworkLevelReasoner nlr; @@ -4395,7 +4398,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite input[1] = 0; TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); // With Bilinear, case 1 @@ -4412,7 +4415,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); } - + void test_evaluate_non_consecutive_layers() { NLR::NetworkLevelReasoner nlr; @@ -4487,7 +4490,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; populateNetworkWithAbsAndRelu( nlr ); - + double input[2]; double output[2]; @@ -4507,13 +4510,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); } - + void test_evaluate_round_and_sign() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithRoundAndSign( nlr ); - + double input[2]; double output[2]; @@ -4530,16 +4533,16 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001) ); + TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); } - + void test_evaluate_leaky_relu_and_sigmoid() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithLeakyReluAndSigmoid( nlr ); - + double input[2]; double output[2]; @@ -4559,13 +4562,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); } - + void test_evaluate_softmax_and_max() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithSoftmaxAndMax( nlr ); - + double input[2]; double output[1]; @@ -4583,13 +4586,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); } - + void test_evaluate_relu_and_bilinear() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithReluAndBilinear( nlr ); - + double input[2]; double output[1]; @@ -4689,7 +4692,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - + void test_store_into_other_with_round() { NLR::NetworkLevelReasoner nlr; @@ -4724,7 +4727,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - + void test_store_into_other_with_sign() { NLR::NetworkLevelReasoner nlr; @@ -4759,7 +4762,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - + void test_store_into_other_with_abs() { NLR::NetworkLevelReasoner nlr; @@ -4794,7 +4797,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - + void test_store_into_other_with_leaky_relu() { NLR::NetworkLevelReasoner nlr; @@ -4829,7 +4832,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - + void test_store_into_other_with_max() { NLR::NetworkLevelReasoner nlr; @@ -4864,7 +4867,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - + void test_store_into_other_with_softmax() { NLR::NetworkLevelReasoner nlr; @@ -4899,7 +4902,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - + void test_store_into_other_with_bilinear() { NLR::NetworkLevelReasoner nlr; @@ -4934,7 +4937,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - + void test_generate_input_query() { NLR::NetworkLevelReasoner nlr; @@ -5187,7 +5190,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0.0001 ) ); } } - + void test_simulate_round() { NLR::NetworkLevelReasoner nlr; @@ -5263,7 +5266,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0.0001 ) ); } } - + void test_simulate_sign() { NLR::NetworkLevelReasoner nlr; @@ -5335,7 +5338,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -4 ) ); } } - + void test_simulate_abs() { NLR::NetworkLevelReasoner nlr; @@ -5407,7 +5410,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 10 ) ); } } - + void test_simulate_leaky_relu() { NLR::NetworkLevelReasoner nlr; @@ -5483,7 +5486,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0.0001 ) ); } } - + void test_simulate_max() { NLR::NetworkLevelReasoner nlr; @@ -5540,7 +5543,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -5 ) ); } } - + void test_simulate_softmax() { NLR::NetworkLevelReasoner nlr; @@ -5618,7 +5621,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0.0001 ) ); } } - + void test_simulate_bilinear() { NLR::NetworkLevelReasoner nlr; @@ -5756,9 +5759,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void test_simulate_abs_and_relu() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithAbsAndRelu( nlr ); - + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); // Simulate1 @@ -5803,7 +5806,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 4 ) ); } } - + void test_simulate_round_and_sign() { NLR::NetworkLevelReasoner nlr; @@ -5854,7 +5857,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -4 ) ); } } - + void test_simulate_leaky_relu_and_sigmoid() { NLR::NetworkLevelReasoner nlr; @@ -5909,7 +5912,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0.0001 ) ); } } - + void test_simulate_softmax_and_max() { NLR::NetworkLevelReasoner nlr; @@ -5952,7 +5955,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0.0001 ) ); } } - + void test_simulate_relu_and_bilinear() { NLR::NetworkLevelReasoner nlr; @@ -5993,7 +5996,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0 ) ); } } - + void test_interval_arithmetic_bound_propagation_relu_constraints() { NLR::NetworkLevelReasoner nlr; @@ -6326,7 +6329,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_sign_constraints() { NLR::NetworkLevelReasoner nlr; @@ -6459,12 +6462,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_leaky_relu_constraints() { NLR::NetworkLevelReasoner nlr; populateNetworkWithLeakyRelu( nlr ); - + MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -6512,14 +6515,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), - + Tightening( 9, -0.28, Tightening::LB ), Tightening( 9, 10.1, Tightening::UB ), Tightening( 11, -0.38, Tightening::LB ), Tightening( 11, 9.1, Tightening::UB ), @@ -6579,7 +6582,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), - + Tightening( 9, -0.34, Tightening::LB ), Tightening( 9, 7.1, Tightening::UB ), Tightening( 11, -0.32, Tightening::LB ), Tightening( 11, 7.3, Tightening::UB ), @@ -6590,13 +6593,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_round_constraints() { NLR::NetworkLevelReasoner nlr; populateNetworkWithRound( nlr ); - - + + MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -6644,14 +6647,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), - + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), Tightening( 11, -7, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), @@ -6704,14 +6707,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - + Tightening( 9, -16, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), Tightening( 11, -15, Tightening::LB ), Tightening( 11, 2, Tightening::UB ), @@ -6722,13 +6725,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_sigmoid_constraints() { NLR::NetworkLevelReasoner nlr; populateNetworkWithSigmoids( nlr ); - - + + MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -6776,14 +6779,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - + Tightening( 3, 0.5000, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), Tightening( 5, 0.0067, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.7311, Tightening::UB ), Tightening( 8, -0.2244, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), Tightening( 10, 0.3948, Tightening::LB ), Tightening( 10, 2.2244, Tightening::UB ), - + Tightening( 9, 0.4441, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), Tightening( 11, 0.5974, Tightening::LB ), Tightening( 11, 0.9024, Tightening::UB ), @@ -6836,14 +6839,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - + Tightening( 3, 0.1192, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), Tightening( 8, -0.7616, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), Tightening( 10, 0.2384, Tightening::LB ), Tightening( 10, 2.6052, Tightening::UB ), - + Tightening( 9, 0.3183, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), Tightening( 11, 0.5593, Tightening::LB ), Tightening( 11, 0.9312, Tightening::UB ), @@ -6854,13 +6857,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_max_constraints() { NLR::NetworkLevelReasoner nlr; populateNetworkWithMax( nlr ); - - + + MockTableau tableau; tableau.getBoundManager().initialize( 12 ); @@ -6905,21 +6908,21 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), Tightening( 8, -1, Tightening::LB ), Tightening( 8, 10, Tightening::UB ), Tightening( 9, -8, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), - + Tightening( 11, -10, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Change the current bounds @@ -6960,28 +6963,28 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 3, -8, Tightening::LB ), Tightening( 3, 9, Tightening::UB ), Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), Tightening( 5, -6, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 9, Tightening::UB ), Tightening( 7, -5, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), Tightening( 8, -5, Tightening::LB ), Tightening( 8, 16, Tightening::UB ), Tightening( 9, -14, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - + Tightening( 10, -5, Tightening::LB ), Tightening( 10, 16, Tightening::UB ), - + Tightening( 11, -16, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_softmax_constraints() { NLR::NetworkLevelReasoner nlr; populateNetworkWithSoftmax( nlr ); - - + + MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -7029,14 +7032,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - + Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - + Tightening( 9, 0.0240, Tightening::LB ), Tightening( 9, 0.8349, Tightening::UB ), Tightening( 11, 0.1651, Tightening::LB ), Tightening( 11, 0.9759, Tightening::UB ), @@ -7089,14 +7092,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), - + Tightening( 9, 0.0184, Tightening::LB ), Tightening( 9, 0.8678, Tightening::UB ), Tightening( 11, 0.1322, Tightening::LB ), Tightening( 11, 0.9816, Tightening::UB ), @@ -7107,13 +7110,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_bilinear_constraints() { NLR::NetworkLevelReasoner nlr; populateNetworkWithBilinear( nlr ); - - + + MockTableau tableau; tableau.getBoundManager().initialize( 12 ); @@ -7158,21 +7161,21 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 3, -0.5, Tightening::LB ), Tightening( 3, 0.5, Tightening::UB ), Tightening( 4, -0.3, Tightening::LB ), Tightening( 4, 0.3, Tightening::UB ), Tightening( 5, -0.3, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), - + Tightening( 6, -0.55, Tightening::LB ), Tightening( 6, 0.55, Tightening::UB ), Tightening( 7, -0.09, Tightening::LB ), Tightening( 7, 0.09, Tightening::UB ), Tightening( 8, 1.36, Tightening::LB ), Tightening( 8, 2.64, Tightening::UB ), Tightening( 9, -0.64, Tightening::LB ), Tightening( 9, 0.64, Tightening::UB ), - + Tightening( 10, -1.6896, Tightening::LB ), Tightening( 10, 1.6896, Tightening::UB ), - + Tightening( 11, -1.6896, Tightening::LB ), Tightening( 11, 1.6896, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Change the current bounds @@ -7213,22 +7216,22 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 3, -0.8, Tightening::LB ), Tightening( 3, 0.9, Tightening::UB ), Tightening( 4, -0.5, Tightening::LB ), Tightening( 4, 0.5, Tightening::UB ), Tightening( 5, -0.6, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), - + Tightening( 6, -0.88, Tightening::LB ), Tightening( 6, 0.99, Tightening::UB ), Tightening( 7, -0.3, Tightening::LB ), Tightening( 7, 0.3, Tightening::UB ), Tightening( 8, 0.82, Tightening::LB ), Tightening( 8, 3.29, Tightening::UB ), Tightening( 9, -1.29, Tightening::LB ), Tightening( 9, 1.18, Tightening::UB ), - + Tightening( 10, -4.2441, Tightening::LB ), Tightening( 10, 3.8822, Tightening::UB ), - + Tightening( 11, -3.8822, Tightening::LB ), Tightening( 11, 4.2441, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_abs_and_relu_constraints() { NLR::NetworkLevelReasoner nlr; @@ -7357,16 +7360,16 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), } ); - + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_round_and_sign_constraints() { NLR::NetworkLevelReasoner nlr; populateNetworkWithRoundAndSign( nlr ); - + MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -7414,14 +7417,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), - + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), @@ -7431,7 +7434,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Change the current bounds @@ -7475,14 +7478,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), @@ -7493,12 +7496,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_leaky_relu_and_sigmoid_constraints() { NLR::NetworkLevelReasoner nlr; populateNetworkWithLeakyReluAndSigmoid( nlr ); - + MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -7546,14 +7549,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), - + Tightening( 9, 0.0573, Tightening::LB ), Tightening( 9, 0.9999, Tightening::UB ), Tightening( 11, 0.0219, Tightening::LB ), Tightening( 11, 0.9999, Tightening::UB ), @@ -7563,7 +7566,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Change the current bounds @@ -7614,7 +7617,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), - + Tightening( 9, 0.0323, Tightening::LB ), Tightening( 9, 0.9992, Tightening::UB ), Tightening( 11, 0.0392, Tightening::LB ), Tightening( 11, 0.9993, Tightening::UB ), @@ -7625,13 +7628,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_softmax_and_max_constraints() { NLR::NetworkLevelReasoner nlr; populateNetworkWithSoftmaxAndMax( nlr ); - - + + MockTableau tableau; tableau.getBoundManager().initialize( 12 ); @@ -7671,26 +7674,34 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Perform the tightening pass TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), - Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), - - Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), - - Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - - Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ) - } ); + List expectedBounds( { Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), + Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.0066, Tightening::LB ), + Tightening( 3, 0.9517, Tightening::UB ), + Tightening( 5, 0.0007, Tightening::LB ), + Tightening( 5, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), + Tightening( 7, 0.7297, Tightening::UB ), + + Tightening( 8, -0.7225, Tightening::LB ), + Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 9, 0.3192, Tightening::LB ), + Tightening( 9, 2.9819, Tightening::UB ), + + Tightening( 10, 0.3192, Tightening::LB ), + Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 11, -2.9819, Tightening::LB ), + Tightening( 11, -0.3192, Tightening::UB ) } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Change the current bounds @@ -7726,28 +7737,36 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Perform the tightening pass TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + List expectedBounds2( { Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), + Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, 2, Tightening::UB ), - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.0654, Tightening::LB ), Tightening( 9, 2.9933, Tightening::UB ), - - Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), - - Tightening( 11, -2.9933, Tightening::LB ), Tightening( 11, -0.0654, Tightening::UB ) - } ); + Tightening( 3, 0.0009, Tightening::LB ), + Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), + Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), + Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.0654, Tightening::LB ), + Tightening( 9, 2.9933, Tightening::UB ), + + Tightening( 10, 0.0654, Tightening::LB ), + Tightening( 10, 2.9933, Tightening::UB ), + + Tightening( 11, -2.9933, Tightening::LB ), + Tightening( 11, -0.0654, Tightening::UB ) } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_relu_and_bilinear_constraints() { NLR::NetworkLevelReasoner nlr; @@ -7796,7 +7815,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), @@ -7805,7 +7824,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 9, -1, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), Tightening( 10, -7, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), - + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), } ); @@ -7848,27 +7867,27 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), Tightening( 9, -2, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), Tightening( 10, -14, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), - - Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_sbt_relus_all_active() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); @@ -8155,7 +8174,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void test_sbt_relu_residual1() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -8180,20 +8199,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ReLU is undecided, bound is concretized. Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - + x2.lb = 0.5x0 - x2.ub = 0.5x0 + 0.5 + x2.ub = 0.5x0 + 0.5 x2 range: [0, 1] - + Layer 2 (with residual from x0): x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] x3 range: [-1, 2.5] - + ReLU is undecided, bound is concretized. Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. - + x4.lb = 0 x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] x4 range: [0, 2.5] @@ -8216,18 +8235,18 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2.5, Tightening::UB ), Tightening( 5, 2, Tightening::LB ), - Tightening( 5, 5.5, Tightening::UB ), + Tightening( 5, 5.5, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - + void test_sbt_relu_residual2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -8252,20 +8271,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ReLU is undecided, bound is concretized. Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - + x2.lb = 0.5x0 - x2.ub = 0.5x0 + 0.5 + x2.ub = 0.5x0 + 0.5 x2 range: [0, 1] - + Layer 2 (with residual from x0): x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] x3 range: [-1, 2.5] - + ReLU is undecided, bound is concretized. Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. - + x4.lb = 0 x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] x4 range: [0, 2.5] @@ -8273,9 +8292,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 3 (with residual from x0): x5.lb = 3 ( 0 ) + 1 ( x0 ) + 1 = 1x0 + 1 : [0, 2] - x5.ub = 3 ( -15/14 x0 + 20/14 ) + 1 ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] - x5 range: [0, 7.5] - + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 1 ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 + = 7.5] x5 range: [0, 7.5] + Layer 4: x6.lb = 1x0 + 1 : [0, 2] x6.ub = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] @@ -8301,11 +8320,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - + void test_sbt_relu_reindex() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -8330,41 +8349,41 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x2.lb = x0 + x1 : [-2, 2] x2.ub = x0 + x1 : [-2, 2] - + x3.lb = x0 - x1 : [-2, 2] x3.ub = x0 - x1 : [-2, 2] Both ReLUs are undecided, bounds are concretized. Coefficient: 2/( 2--2 ) = 2/4 = 0.5 - + x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 x4 range: [0, 2] - + x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 x5 range: [0, 2] - + Layer 2: x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] x6 range: [-1, 3] - + x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] x7 range: [-2, 2] - + Both ReLUs are undecided, bounds are concretized. Coefficient (first ReLU, lower): 1/( 1--1 ) = 1/2 = 0.5 Coefficient (first ReLU, upper): 1 (propagated as is) Coefficient (second ReLU, lower): 0 (bound is zero) Coefficient (second ReLU, upper): 2/( 2--2 ) = 2/4 = 0.5 - + x8.lb = 0.5 ( x0 ) = 0.5x0 x8.ub = x0 + 2 x8 range: [0, 3] - + x9.lb = 0 x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 x9 range: [0, 2] @@ -8374,11 +8393,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x10.lb = 1 ( 0.5x0 ) + 1 ( 0 ) + 1 = 0.5x0 + 1 : [0.5, 1.5] x10.ub = 1 ( x0 + 2 ) + 1 ( 0.5x1 + 1.5 ) + 1 = x0 + 0.5x1 + 4.5 : [3, 6] x10 range: [0.5, 6] - + x11.lb = 0.5x1 - 0.5 x11.ub = 0.5x1 + 1.5 x11 range: [0, 2] - + */ List expectedBounds( @@ -8388,7 +8407,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), @@ -8403,7 +8422,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - + void test_sbt_abs_all_positive() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); @@ -8635,7 +8654,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - + void test_sbt_absolute_values_positive_and_not_fixed() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); @@ -8874,7 +8893,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - + void test_sbt_signs_positive_and_not_fixed() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); @@ -9088,7 +9107,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Second sign is positive, bounds become constant 1 x4: all set to -1 - + x5: all set to 1 Layer 2: @@ -9116,7 +9135,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - + void test_sbt_leaky_relu() { NLR::NetworkLevelReasoner nlr; @@ -9143,43 +9162,43 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x2.lb = x0 + x1 : [-2, 2] x2.ub = x0 + x1 : [-2, 2] - + x3.lb = x0 - x1 : [-2, 2] x3.ub = x0 - x1 : [-2, 2] Both LeakyReLUs are undecided, bounds are concretized. Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 Bias: ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 - + x4.lb = x0 + x1 x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6x0 + 0.6x1 + 0.8 x4 range: [-0.4, 2] - + x5.lb = x0 - x1 x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6x0 - 0.6x1 + 0.8 x5 range: [-0.4, 2] - + Layer 2: x6.lb = 1 ( x0 + x1 ) + 1 ( x0 - x1 ) = 2x0 : [-2, 2] x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2x0 + 1.6 : [0.4, 2.8] x6 range: [-2, 2.8] - + x7.lb = 1 ( x0 + x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( x0 - x1 ) = -0.4x0 + 1.6x1 + 0.8 : [-1.2, 2.8] x7 range: [-2.8, 2.8] - + Both LeakyReLUs are undecided, bounds are concretized. Coefficient (first LeakyReLU): ( 2.8 - 0.2*-2 )/( 2.8--2 ) = 3.2/4.8 = 10/15 Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2 / ( 2.8--2 ) = 14/15 - + Coefficient (second LeakyReLU): ( 2.8 - 0.2*-2.8 )/( 2.8--2.8 ) = 3.36/5.6 = 0.6 Bias (second LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2.8 / ( 2.8--2.8 ) = 1.12 - + x8.lb = 2x0 x8.ub = 10/15 ( 1.2x0 + 1.6 ) + 14/15 = 0.8x0 + 2 x8 range: [-0.4, 2.8] - + x9.lb = 0.4x0 + 1.6x1 - 0.8 x9.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 x9 range: [-0.56, 2.8] @@ -9187,30 +9206,30 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 3: x10.lb = 1 ( 0.4x0 + 1.6x1 - 0.8 ) + 1 ( 2x0 ) + 1 = 2.4x0 + 1.6x1 + 0.2 : [-3.8, 5.2] - x10.ub = 1 ( -0.24 x0 + 0.96 x1 + 1.6 ) + 1 ( 0.8x0 + 2 ) + 1 = 0.56x0 + 0.96x1 + 4.6 : [3.08, 6.12] - x10 range: [-3.8, 6.12] - + x10.ub = 1 ( -0.24 x0 + 0.96 x1 + 1.6 ) + 1 ( 0.8x0 + 2 ) + 1 = 0.56x0 + 0.96x1 + 4.6 : + [3.08, 6.12] x10 range: [-3.8, 6.12] + x11.lb = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] x11.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 : [0.4, 2.8] x11 range: [-2.8, 2.8] - + */ List expectedBounds( - { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -0.4, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -0.4, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 4, -0.4, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -0.4, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), - Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), + Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), - Tightening( 8, -0.4, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), - Tightening( 9, -0.56, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), + Tightening( 8, -0.4, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), + Tightening( 9, -0.56, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 6.12, Tightening::UB ), - Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) + Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) } ); @@ -9222,7 +9241,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void test_sbt_sigmoids_and_round() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -9303,11 +9322,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); } - + void test_sbt_max_not_fixed() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -9343,14 +9362,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x4.lb = 0.6 ( x0 + x1 ) = 0.6x0 + 0.6x1 x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 x4 range: [0, 3] - + x5.lb = 0.4 ( x0 - x1 ) = 0.4x0 + 0.4x1 x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 x5 range: [0, 2] - + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub Max inherits lower bound from x4, and its upper bound is constant 3. - + x6.lb = 0.6x0 + 0.6x1 : [-1.2, 1.8] x6.ub = 3 : [3, 3] x6 range: [-1.2, 3] @@ -9386,7 +9405,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void test_sbt_max_fixed() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -9419,12 +9438,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Second ReLU is positive, bounds survive the activation x4: all set to 0 - + x5.lb = x0 - x1 : [3, 5] x5.ub = x0 - x1 : [3, 5] - + Max is fixed because x5.lb > x4.ub, it inherits x5's bounds - + x6.lb = x0 - x1 : [3, 5] x6.ub = x0 - x1 : [3, 5] @@ -9458,7 +9477,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void test_sbt_softmax1() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -9479,7 +9498,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void test_sbt_softmax2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + { Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); NLR::NetworkLevelReasoner nlr; @@ -9581,7 +9600,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void test_sbt_softmax3() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -9606,36 +9625,23 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x2: [1, 1.0001] */ - List expectedBounds({ Tightening( 3, 2, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 0, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), - Tightening( 6, -1, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), - Tightening( 7, -2, Tightening::UB ), - Tightening( 8, 0.8668, Tightening::LB ), - Tightening( 8, 0.8668, Tightening::UB ), - Tightening( 9, 0.9820, Tightening::LB ), - Tightening( 9, 0.9820, Tightening::UB ), - Tightening( 10, 0.1173, Tightening::LB ), - Tightening( 10, 0.1173, Tightening::UB ), - Tightening( 11, 0.0179, Tightening::LB ), - Tightening( 11, 0.0179, Tightening::UB ), - Tightening( 12, 0.0159, Tightening::LB ), - Tightening( 12, 0.0159, Tightening::UB ), - Tightening( 13, 0.9470, Tightening::LB ), - Tightening( 13, 0.9470, Tightening::UB ), - Tightening( 14, -0.9470, Tightening::LB ), - Tightening( 14, -0.9470, Tightening::UB ), - Tightening( 15, 1.0253, Tightening::LB ), - Tightening( 15, 1.0253, Tightening::UB ), - Tightening( 16, -1.0253, Tightening::LB ), - Tightening( 16, -1.0253, Tightening::UB ) + List expectedBounds( + { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), + Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), + Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), + Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), + Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), + Tightening( 13, 0.9470, Tightening::LB ), Tightening( 13, 0.9470, Tightening::UB ), + Tightening( 14, -0.9470, Tightening::LB ), Tightening( 14, -0.9470, Tightening::UB ), + Tightening( 15, 1.0253, Tightening::LB ), Tightening( 15, 1.0253, Tightening::UB ), + Tightening( 16, -1.0253, Tightening::LB ), Tightening( 16, -1.0253, Tightening::UB ) - } ); + } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); @@ -9688,7 +9694,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); TS_ASSERT( FloatUtils::areEqual( value, -0.073207, 0.00001 ) ); } - + void test_sbt_bilinear() { NLR::NetworkLevelReasoner nlr; @@ -9710,7 +9716,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x0: [1, 2] x1: [-2, 1] - + Layer 1: x2.lb = x0 - 2x1 : [-1, 6] @@ -9724,7 +9730,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite alpha_l = x3.lb = -1 beta = x2.lb = -1 gamma_l = -x2.lb * x3.lb = --1 * -1 = -1 - + Upper bound: alpha_u = x3.ub = 3 beta = x2.lb = -1 @@ -9878,12 +9884,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite for ( const auto &bound : expectedBounds ) TS_ASSERT( bounds.exists( bound ) ); } - + void test_backwards_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -9897,42 +9904,42 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), - Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), - } ); - List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { Tightening( 8, 0, Tightening::LB ), - } ); - + + List expectedBounds2( { + Tightening( 8, 0, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -9965,47 +9972,48 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); - Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { Tightening( 8, 0, Tightening::LB ), - Tightening( 9, 0, Tightening::LB ), - } ); - + + List expectedBounds4( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_relu2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -10023,56 +10031,56 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - List expectedBounds( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), - - Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), - Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), - - Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), - - Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), - - Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), - } ); - + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 19, 0, Tightening::LB ), - } ); - + + List expectedBounds2( { + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 19, 0, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -10125,63 +10133,64 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), - Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), - Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), - - Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), - Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), - Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), - - Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), - } ); - + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { Tightening( 7, 0, Tightening::LB ), - - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 20, 0, Tightening::LB ), - } ); - + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 20, 0, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_sigmoid() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -10196,42 +10205,40 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), - Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), - Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), + Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), - Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), - Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), + Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), + Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), + + Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), + Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), + + Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), + Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), + } ); - Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), - Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), - } ); - List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - + + List expectedBounds2( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -10264,46 +10271,45 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), - Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), - Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), + Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), + Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), - Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), - Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), + Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), + Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), + + Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), + Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), + + Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), + Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), + } ); - Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), - Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), - } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { - } ); - + + List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_sigmoid2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -10320,53 +10326,51 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), - Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), - Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), - Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), - - Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), - Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), - Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), - - Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), - Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), - Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), - - Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), - Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), - - Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), - Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), - - Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), - } ); - + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), + Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), + + Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), + Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), + Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), + + Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), + Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), + Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), + + Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), + Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), + + Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), + Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), + + Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - + + List expectedBounds2( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -10419,57 +10423,56 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), - Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), - Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), - - Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), - Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), - Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), - - Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), - Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), - Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), - - Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), - Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), - - Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), - Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), - - Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), - } ); - + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), + Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), + Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), + + Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), + Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), + Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), + + Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), + Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), + Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), + + Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), + Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), + + Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), + Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), + + Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { - } ); - + + List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_abs() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -10484,42 +10487,40 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + } ); - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - } ); - List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - + + List expectedBounds2( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -10552,46 +10553,45 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), + + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), + } ); - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), - } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { - } ); - + + List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_abs2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -10608,53 +10608,51 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -10707,57 +10705,56 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), - - Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), - - Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), - } ); - + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), + + Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), + + Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { - } ); - + + List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_round() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -10772,42 +10769,43 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), + } ); - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), - } ); - List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { Tightening( 11, -4, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - } ); - + + List expectedBounds2( { + Tightening( 11, -4, Tightening::LB ), + Tightening( 11, 6, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -10840,48 +10838,51 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), + + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), + } ); - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), - } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { Tightening( 7, -10, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - - Tightening( 11, -14, Tightening::LB ), Tightening( 11, 11, Tightening::UB ), - } ); - + + List expectedBounds4( { + Tightening( 7, -10, Tightening::LB ), + Tightening( 7, 5, Tightening::UB ), + + Tightening( 11, -14, Tightening::LB ), + Tightening( 11, 11, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_round2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -10898,53 +10899,51 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), - Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), - Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), - - Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), - Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), - - Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), - Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), - - Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), - } ); - + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), + Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), + Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), + + Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), + Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), + + Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), + Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), + + Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - + + List expectedBounds2( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -10997,57 +10996,56 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), - Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), - Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), - Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), - - Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), - } ); - + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), + Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), + Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), + Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), + + Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { - } ); - + + List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_sign() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -11062,42 +11060,40 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - + + List expectedBounds2( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -11130,46 +11126,45 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { - } ); - + + List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_sign2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -11186,53 +11181,51 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - + + List expectedBounds2( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -11285,57 +11278,56 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { - } ); - + + List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_leaky_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -11350,45 +11342,45 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { Tightening( 4, -0.1, Tightening::LB ), - - Tightening( 8, -0.045, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - } ); - + + List expectedBounds2( { + Tightening( 4, -0.1, Tightening::LB ), + + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -11421,50 +11413,51 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); - + List expectedBounds4( { + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_leaky_relu2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -11481,63 +11474,63 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), - Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), - Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), - Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), - Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), - - Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), - Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), - - Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), - Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), - - Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), - } ); - + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.3, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - Tightening( 10, -0.6, Tightening::LB ), - - Tightening( 14, -0.9, Tightening::LB ), - Tightening( 15, -0.89, Tightening::LB ), - Tightening( 16, -0.77, Tightening::LB ), - - Tightening( 19, -2.3133, Tightening::LB ), - Tightening( 20, -1.2, Tightening::LB ), - } ); - + + List expectedBounds2( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 14, -0.9, Tightening::LB ), + Tightening( 15, -0.89, Tightening::LB ), + Tightening( 16, -0.77, Tightening::LB ), + + Tightening( 19, -2.3133, Tightening::LB ), + Tightening( 20, -1.2, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -11590,67 +11583,68 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), - Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), - Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), - Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), - Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), - - Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), - Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), - - Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), - Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), - - Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), - } ); - + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.5, Tightening::LB ), - Tightening( 9, -0.6, Tightening::LB ), - Tightening( 10, -1.5, Tightening::LB ), - - Tightening( 14, -1.1, Tightening::LB ), - Tightening( 15, -2.1771, Tightening::LB ), - Tightening( 16, -1.15, Tightening::LB ), - - Tightening( 19, -5.6259, Tightening::LB ), - Tightening( 20, -1.9, Tightening::LB ), - } ); - + + List expectedBounds4( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), + Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 14, -1.1, Tightening::LB ), + Tightening( 15, -2.1771, Tightening::LB ), + Tightening( 16, -1.15, Tightening::LB ), + + Tightening( 19, -5.6259, Tightening::LB ), + Tightening( 20, -1.9, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_softmax_and_max() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -11660,47 +11654,45 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), - Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), - Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 8, -0.1519, Tightening::LB ), Tightening( 8, 1.4775, Tightening::UB ), - Tightening( 9, 0.6614, Tightening::LB ), Tightening( 9, 2.3899, Tightening::UB ), + Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), + Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), + Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), + + Tightening( 8, -0.1519, Tightening::LB ), Tightening( 8, 1.4775, Tightening::UB ), + Tightening( 9, 0.6614, Tightening::LB ), Tightening( 9, 2.3899, Tightening::UB ), + + Tightening( 10, 0.6614, Tightening::LB ), Tightening( 10, 2.3899, Tightening::UB ), + + Tightening( 11, -2.3899, Tightening::LB ), Tightening( 11, -0.6614, Tightening::UB ), + } ); - Tightening( 10, 0.6614, Tightening::LB ), Tightening( 10, 2.3899, Tightening::UB ), - - Tightening( 11, -2.3899, Tightening::LB ), Tightening( 11, -0.6614, Tightening::UB ), - } ); - List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - + + List expectedBounds2( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -11733,46 +11725,45 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.0674, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), - - Tightening( 10, 0.0674, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), - - Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.0674, Tightening::UB ), - } ); - + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.0674, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), + + Tightening( 10, 0.0674, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), + + Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.0674, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { - } ); - + + List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_softmax_and_max2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -11790,46 +11781,44 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - List expectedBounds( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), - Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), - Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), - Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), - - Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9635, Tightening::UB ), - Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9806, Tightening::UB ), - Tightening( 13, -2.9902, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), - - Tightening( 14, 0.1086, Tightening::LB ), Tightening( 14, 0.9971, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8766, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4595, Tightening::UB ), - - Tightening( 17, 0.1086, Tightening::LB ), Tightening( 17, 0.9971, Tightening::UB ), - } ); - + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), + Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), + Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), + Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), + + Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9635, Tightening::UB ), + Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9806, Tightening::UB ), + Tightening( 13, -2.9902, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), + + Tightening( 14, 0.1086, Tightening::LB ), Tightening( 14, 0.9971, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8766, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4595, Tightening::UB ), + + Tightening( 17, 0.1086, Tightening::LB ), Tightening( 17, 0.9971, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - + + List expectedBounds2( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -11874,51 +11863,50 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), - - Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9989, Tightening::UB ), - Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), - Tightening( 13, -2.9989, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), - - Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9972, Tightening::UB ), - Tightening( 15, 0.0024, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), - - Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9972, Tightening::UB ), - } ); - + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), + + Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9989, Tightening::UB ), + Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), + Tightening( 13, -2.9989, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), + + Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9972, Tightening::UB ), + Tightening( 15, 0.0024, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), + + Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9972, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { - } ); - + + List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_relu_and_bilinear() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -11928,46 +11916,44 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), - - Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), - } ); - List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - + + List expectedBounds2( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -12000,46 +11986,47 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), - } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { Tightening( 7, 0, Tightening::LB ), - } ); - + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_relu_and_bilinear2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -12051,51 +12038,51 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 1, 1 ); tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), - Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), - - Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), - } ); - + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - + + List expectedBounds2( { + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -12131,57 +12118,58 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 14, large ); tableau.setLowerBound( 15, -large ); tableau.setUpperBound( 15, large ); - + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), - Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), - - Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), - } ); - + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { Tightening( 7, 0, Tightening::LB ), - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_preimage_approximation_leaky_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-preimage-approx" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -12196,44 +12184,59 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List polygonal_bounds; - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintPolygonalTightenings( polygonal_bounds ) ); - for ( auto &bound : polygonal_bounds ) + + /*List expectedBounds2( + { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2.1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.35, Tightening::UB ), + Tightening( 11, 1.35, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) { bound.dump(); } - /*TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) );*/ + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -12266,64 +12269,66 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( + + /*List expectedBounds4( { Tightening( 4, -0.5, Tightening::LB ), Tightening( 5, -0.4, Tightening::LB ), Tightening( 8, -0.4571, Tightening::LB ), Tightening( 9, -1.1057, Tightening::LB ), } ); - + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) );*/ } - + bool boundsEqual( const List &bounds, const List &expectedBounds ) { if ( bounds.size() != expectedBounds.size() ) return false; - + bool allFound = true; for ( const auto &bound : bounds ) { bool currentFound = false; for ( const auto &expectedBound : expectedBounds ) { - currentFound |= ( bound._type == expectedBound._type && bound._variable == expectedBound._variable && - FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + currentFound |= + ( bound._type == expectedBound._type && + bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); } allFound &= currentFound; } return allFound; } - + void updateTableau( MockTableau &tableau, List &tightenings ) { ASSERT( tableau ); From 625a5b85ba5d2740cae7af430679bb1acebbc404 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Dec 2024 21:01:22 +0200 Subject: [PATCH 36/81] Add files via upload --- src/configuration/GlobalConfiguration.cpp | 3 ++- src/configuration/GlobalConfiguration.h | 10 +++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index 1a7d133f40..3a08b92234 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -70,8 +70,9 @@ const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 1000; -const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 1000; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 100; const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.1; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.05; const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index a779b93937..78ab9f662e 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -150,19 +150,19 @@ class GlobalConfiguration // Random seed for generating simulation values. static const unsigned SIMULATION_RANDOM_SEED; - + // Random seed for EstimateVolume procedure (PreimageApproximation). static const unsigned VOLUME_ESTIMATION_RANDOM_SEED; - + // Number of iterations for EstimateVolume procedure (PreimageApproximation). static const unsigned VOLUME_ESTIMATION_ITERATIONS; - + // Random seed for PreimageApproximation optimization. static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED; - + // Maximum iterations for PreimageApproximation optimization. static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; - + // Step size for PreimageApproximation optimization. static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; From 745cc957d22df8b4ae35a5f75c1324aff6cd43cb Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 9 Jan 2025 20:51:10 +0200 Subject: [PATCH 37/81] Add files via upload --- src/nlr/GlobalConfiguration.cpp | 238 ++++++++++++++++++++++++ src/nlr/GlobalConfiguration.h | 318 ++++++++++++++++++++++++++++++++ 2 files changed, 556 insertions(+) create mode 100644 src/nlr/GlobalConfiguration.cpp create mode 100644 src/nlr/GlobalConfiguration.h diff --git a/src/nlr/GlobalConfiguration.cpp b/src/nlr/GlobalConfiguration.cpp new file mode 100644 index 0000000000..a5d5100011 --- /dev/null +++ b/src/nlr/GlobalConfiguration.cpp @@ -0,0 +1,238 @@ +/********************* */ +/*! \file GlobalConfiguration.cpp + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Parth Shah, Derek Huang + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + + **/ + +#include "GlobalConfiguration.h" + +#include "DivideStrategy.h" +#include "MString.h" + +#include + + +// The exponential moving average is calculated as +// ema = current * alpha + previous * (1 - alpha) +const double GlobalConfiguration::EXPONENTIAL_MOVING_AVERAGE_ALPHA = 0.5; + +// Whether to use SoI instead of Reluplex for local search for satisfying assignments +// to non-linear constraint. +bool GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH = true; + +const double GlobalConfiguration::SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI = 5; + +// Use the polarity metrics to decide which branch to take first in a case split +// and how to repair a ReLU constraint. +const bool GlobalConfiguration::USE_POLARITY_BASED_DIRECTION_HEURISTICS = true; + +const double GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS = 0.0000000001; +const unsigned GlobalConfiguration::DEFAULT_DOUBLE_TO_STRING_PRECISION = 10; +const unsigned GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY = 10000; +const unsigned GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY_GUROBI = 100; +const double GlobalConfiguration::BOUND_COMPARISON_ADDITIVE_TOLERANCE = 0.0000001; +const double GlobalConfiguration::BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE = 0.001 * 0.0000001; +const double GlobalConfiguration::PIVOT_CHANGE_COLUMN_TOLERANCE = 0.000000001; +const double GlobalConfiguration::PIVOT_ROW_AND_COLUMN_TOLERANCE = 0.01; +const double GlobalConfiguration::ENTRY_ELIGIBILITY_TOLERANCE = 0.00000001; +const double GlobalConfiguration::RATIO_CONSTRAINT_ADDITIVE_TOLERANCE = 0.0000001 * 0.3; +const double GlobalConfiguration::RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE = + 0.001 * 0.0000001 * 0.3; +const double GlobalConfiguration::HARRIS_RATIO_CONSTRAINT_ADDITIVE_TOLERANCE = 0.0000001 * 0.5; +const double GlobalConfiguration::HARRIS_RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE = + 0.001 * 0.0000001 * 0.5; +const double GlobalConfiguration::BASIC_COSTS_ADDITIVE_TOLERANCE = 0.0000001; +const double GlobalConfiguration::BASIC_COSTS_MULTIPLICATIVE_TOLERANCE = 0.001 * 0.0000001; +const double GlobalConfiguration::SPARSE_FORREST_TOMLIN_DIAGONAL_ELEMENT_TOLERANCE = 0.00001; +const unsigned GlobalConfiguration::DEGRADATION_CHECKING_FREQUENCY = 100; +const double GlobalConfiguration::DEGRADATION_THRESHOLD = 0.1; +const double GlobalConfiguration::ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD = 0.0001; +const bool GlobalConfiguration::USE_COLUMN_MERGING_EQUATIONS = false; +const double GlobalConfiguration::GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD = 0.1; +const unsigned GlobalConfiguration::MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS = 5; +const DivideStrategy GlobalConfiguration::SPLITTING_HEURISTICS = DivideStrategy::ReLUViolation; +const unsigned GlobalConfiguration::INTERVAL_SPLITTING_FREQUENCY = 10; +const unsigned GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD = 10; +const unsigned GlobalConfiguration::BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY = 100; +const unsigned GlobalConfiguration::ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS = 20; +const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; + +const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 25000; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 25; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.025; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.25; + +const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; + +const double GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT = 0.00000000001; +const double GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT = 0.00000001; + +const double GlobalConfiguration::SIGMOID_CUTOFF_CONSTANT = 20; + +const bool GlobalConfiguration::PREPROCESS_INPUT_QUERY = true; +const bool GlobalConfiguration::PREPROCESSOR_ELIMINATE_VARIABLES = true; +const bool GlobalConfiguration::PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; +const bool GlobalConfiguration::NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; +const double GlobalConfiguration::PREPROCESSOR_ALMOST_FIXED_THRESHOLD = 0.00001; + +const unsigned GlobalConfiguration::PREPROCESSSING_MAX_TIGHTEING_ROUND = 10; + +const bool GlobalConfiguration::WARM_START = false; + +const unsigned GlobalConfiguration::MAX_ITERATIONS_WITHOUT_PROGRESS = 10000; + +const unsigned GlobalConfiguration::PSE_ITERATIONS_BEFORE_RESET = 1000; +const double GlobalConfiguration::PSE_GAMMA_ERROR_THRESHOLD = 0.001; +const double GlobalConfiguration::PSE_GAMMA_UPDATE_TOLERANCE = 0.000000001; + +const double GlobalConfiguration::CONSTRAINT_COMPARISON_TOLERANCE = 0.00001; + +const double GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD = 0.6; + +const bool GlobalConfiguration::ONLY_AUX_INITIAL_BASIS = false; + +const GlobalConfiguration::ExplicitBasisBoundTighteningType + GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE = + GlobalConfiguration::COMPUTE_INVERTED_BASIS_MATRIX; +const bool GlobalConfiguration::EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION = false; +const double GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_ROUNDING_CONSTANT = 1e-6; + +const unsigned GlobalConfiguration::REFACTORIZATION_THRESHOLD = 100; +const GlobalConfiguration::BasisFactorizationType GlobalConfiguration::BASIS_FACTORIZATION_TYPE = + GlobalConfiguration::SPARSE_FORREST_TOMLIN_FACTORIZATION; + +const unsigned GlobalConfiguration::POLARITY_CANDIDATES_THRESHOLD = 5; + +const unsigned GlobalConfiguration::DNC_DEPTH_THRESHOLD = 5; + +const double GlobalConfiguration::MINIMAL_COEFFICIENT_FOR_TIGHTENING = 0.01; +const double GlobalConfiguration::LEMMA_CERTIFICATION_TOLERANCE = 0.000001; +const bool GlobalConfiguration::WRITE_JSON_PROOF = false; + +const unsigned GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH = 3; +const unsigned GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS = 10; + +#ifdef ENABLE_GUROBI +const unsigned GlobalConfiguration::GUROBI_NUMBER_OF_THREADS = 1; +const bool GlobalConfiguration::GUROBI_LOGGING = false; +#endif // ENABLE_GUROBI + +// Logging - note that it is enabled only in Debug mode +const bool GlobalConfiguration::DNC_MANAGER_LOGGING = false; +const bool GlobalConfiguration::ENGINE_LOGGING = false; +const bool GlobalConfiguration::TABLEAU_LOGGING = false; +const bool GlobalConfiguration::SMT_CORE_LOGGING = false; +const bool GlobalConfiguration::DANTZIGS_RULE_LOGGING = false; +const bool GlobalConfiguration::BASIS_FACTORIZATION_LOGGING = false; +const bool GlobalConfiguration::PREPROCESSOR_LOGGING = false; +const bool GlobalConfiguration::INPUT_QUERY_LOGGING = false; +const bool GlobalConfiguration::PROJECTED_STEEPEST_EDGE_LOGGING = false; +const bool GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING = false; +const bool GlobalConfiguration::QUERY_LOADER_LOGGING = false; +const bool GlobalConfiguration::SYMBOLIC_BOUND_TIGHTENER_LOGGING = false; +const bool GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING = false; +const bool GlobalConfiguration::MPS_PARSER_LOGGING = false; +const bool GlobalConfiguration::ONNX_PARSER_LOGGING = false; +const bool GlobalConfiguration::SOI_LOGGING = false; +const bool GlobalConfiguration::SCORE_TRACKER_LOGGING = false; +const bool GlobalConfiguration::CEGAR_LOGGING = false; + +const bool GlobalConfiguration::USE_SMART_FIX = false; +const bool GlobalConfiguration::USE_LEAST_FIX = false; + +void GlobalConfiguration::print() +{ + printf( "****************************\n" ); + printf( "*** Global Configuraiton ***\n" ); + printf( "****************************\n" ); + printf( " DEFAULT_EPSILON_FOR_COMPARISONS: %.15lf\n", DEFAULT_EPSILON_FOR_COMPARISONS ); + printf( " DEFAULT_DOUBLE_TO_STRING_PRECISION: %u\n", DEFAULT_DOUBLE_TO_STRING_PRECISION ); + printf( " STATISTICS_PRINTING_FREQUENCY: %u\n", STATISTICS_PRINTING_FREQUENCY ); + printf( " BOUND_COMPARISON_ADDITIVE_TOLERANCE: %.15lf\n", + BOUND_COMPARISON_ADDITIVE_TOLERANCE ); + printf( " BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE: %.15lf\n", + BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE ); + printf( " PIVOT_CHANGE_COLUMN_TOLERANCE: %.15lf\n", PIVOT_CHANGE_COLUMN_TOLERANCE ); + printf( " RATIO_CONSTRAINT_ADDITIVE_TOLERANCE: %.15lf\n", + RATIO_CONSTRAINT_ADDITIVE_TOLERANCE ); + printf( " RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE: %.15lf\n", + RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE ); + printf( " BASIC_COSTS_ADDITIVE_TOLERANCE: %.15lf\n", BASIC_COSTS_ADDITIVE_TOLERANCE ); + printf( " BASIC_COSTS_MULTIPLICATIVE_TOLERANCE: %.15lf\n", + BASIC_COSTS_MULTIPLICATIVE_TOLERANCE ); + printf( " DEGRADATION_CHECKING_FREQUENCY: %u\n", DEGRADATION_CHECKING_FREQUENCY ); + printf( " DEGRADATION_THRESHOLD: %.15lf\n", DEGRADATION_THRESHOLD ); + printf( " ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD: %.15lf\n", ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD ); + printf( " USE_COLUMN_MERGING_EQUATIONS: %s\n", USE_COLUMN_MERGING_EQUATIONS ? "Yes" : "No" ); + printf( " GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD: %.15lf\n", + GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD ); + printf( " MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS: %u\n", MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS ); + printf( " BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY: %u\n", + BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY ); + printf( " COST_FUNCTION_ERROR_THRESHOLD: %.15lf\n", COST_FUNCTION_ERROR_THRESHOLD ); + printf( " USE_HARRIS_RATIO_TEST: %s\n", USE_HARRIS_RATIO_TEST ? "Yes" : "No" ); + + printf( " PREPROCESS_INPUT_QUERY: %s\n", PREPROCESS_INPUT_QUERY ? "Yes" : "No" ); + printf( " PREPROCESSOR_ELIMINATE_VARIABLES: %s\n", + PREPROCESSOR_ELIMINATE_VARIABLES ? "Yes" : "No" ); + printf( " PSE_ITERATIONS_BEFORE_RESET: %u\n", PSE_ITERATIONS_BEFORE_RESET ); + printf( " PSE_GAMMA_ERROR_THRESHOLD: %.15lf\n", PSE_GAMMA_ERROR_THRESHOLD ); + printf( " CONSTRAINT_COMPARISON_TOLERANCE: %.15lf\n", CONSTRAINT_COMPARISON_TOLERANCE ); + + String basisBoundTighteningType; + switch ( EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE ) + { + case COMPUTE_INVERTED_BASIS_MATRIX: + basisBoundTighteningType = "Compute inverted basis matrix"; + break; + + case USE_IMPLICIT_INVERTED_BASIS_MATRIX: + basisBoundTighteningType = "Use implicit inverted basis matrix"; + break; + + default: + basisBoundTighteningType = "Unknown"; + break; + } + + printf( " EXPLICIT_BASIS_BOUND_TIGHTENING_INVERT_BASIS: %s\n", + basisBoundTighteningType.ascii() ); + printf( " EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION: %s\n", + EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION ? "Yes" : "No" ); + printf( " REFACTORIZATION_THRESHOLD: %u\n", REFACTORIZATION_THRESHOLD ); + + String basisFactorizationType; + if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == GlobalConfiguration::LU_FACTORIZATION ) + basisFactorizationType = "LU_FACTORIZATION"; + else if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == + GlobalConfiguration::SPARSE_LU_FACTORIZATION ) + basisFactorizationType = "SPARSE_LU_FACTORIZATION"; + else if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == + GlobalConfiguration::FORREST_TOMLIN_FACTORIZATION ) + basisFactorizationType = "FORREST_TOMLIN_FACTORIZATION"; + else + basisFactorizationType = "Unknown"; + + printf( " BASIS_FACTORIZATION_TYPE: %s\n", basisFactorizationType.ascii() ); + printf( "****************************\n" ); +} + +// +// Local Variables: +// compile-command: "make -C ../.. " +// tags-file-name: "../../TAGS" +// c-basic-offset: 4 +// End: +// diff --git a/src/nlr/GlobalConfiguration.h b/src/nlr/GlobalConfiguration.h new file mode 100644 index 0000000000..92dcfdd049 --- /dev/null +++ b/src/nlr/GlobalConfiguration.h @@ -0,0 +1,318 @@ +/********************* */ +/*! \file GlobalConfiguration.h + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Parth Shah, Derek Huang + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + + **/ + +#ifndef __GlobalConfiguration_h__ +#define __GlobalConfiguration_h__ + +#include "DivideStrategy.h" + +class GlobalConfiguration +{ +public: + static void print(); + + // The exponential moving average is calculated as + // ema = current * alpha + previous * (1 - alpha) + static const double EXPONENTIAL_MOVING_AVERAGE_ALPHA; + + // Whether to use SoI instead of Reluplex for local search for satisfying assignments + // to non-linear constraint. + static bool USE_DEEPSOI_LOCAL_SEARCH; + + // The quantity by which the score is bumped up for PLContraints not + // participating in the SoI. This promotes those constraints in the branching + // order. + static const double SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI; + + // Use the polarity metrics to decide which branch to take first in a case split + // and how to repair a ReLU constraint. + static const bool USE_POLARITY_BASED_DIRECTION_HEURISTICS; + + // The default epsilon used for comparing doubles + static const double DEFAULT_EPSILON_FOR_COMPARISONS; + + // The precision level when convering doubles to strings + static const unsigned DEFAULT_DOUBLE_TO_STRING_PRECISION; + + // How often should the main loop print statistics? + static const unsigned STATISTICS_PRINTING_FREQUENCY; + static const unsigned STATISTICS_PRINTING_FREQUENCY_GUROBI; + + // Tolerance when checking whether the value computed for a basic variable is out of bounds + static const double BOUND_COMPARISON_ADDITIVE_TOLERANCE; + static const double BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE; + + // Tolerance when checking whether a basic variable depends on a non-basic variable, by looking + // at the change column, as part of a pivot operation. + static const double PIVOT_CHANGE_COLUMN_TOLERANCE; + + // Tolerance for the difference when computing the pivot entry by column and by row + static const double PIVOT_ROW_AND_COLUMN_TOLERANCE; + + // Tolerance when checking whether a non-basic variable is eligible for being selected as the + // entering variable, by its reduced cost + static const double ENTRY_ELIGIBILITY_TOLERANCE; + + // Ratio test tolerance constants + static const double RATIO_CONSTRAINT_ADDITIVE_TOLERANCE; + static const double RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE; + static const double HARRIS_RATIO_CONSTRAINT_ADDITIVE_TOLERANCE; + static const double HARRIS_RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE; + + // Cost function tolerance constants + static const double BASIC_COSTS_ADDITIVE_TOLERANCE; + static const double BASIC_COSTS_MULTIPLICATIVE_TOLERANCE; + + // Sparse ForrestTomlin diagonal element tolerance constant + static const double SPARSE_FORREST_TOMLIN_DIAGONAL_ELEMENT_TOLERANCE; + + // Toggle use of Harris' two-pass ratio test for selecting the leaving variable + static const bool USE_HARRIS_RATIO_TEST; + + // Toggle query-preprocessing on/off. + static const bool PREPROCESS_INPUT_QUERY; + + // Assuming the preprocessor is on, toggle whether or not it will attempt to perform variable + // elimination. + static const bool PREPROCESSOR_ELIMINATE_VARIABLES; + + // Toggle whether or not PL/NL constraints will be called upon + // to add auxiliary variables and equations after preprocessing. + static const bool PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING; + static const bool NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING; + + // If the difference between a variable's lower and upper bounds is smaller than this + // threshold, the preprocessor will treat it as fixed. + static const double PREPROCESSOR_ALMOST_FIXED_THRESHOLD; + + // Maximal rounds of tightening to perform in the preprocessor to avoid non-termination. + static const unsigned PREPROCESSSING_MAX_TIGHTEING_ROUND; + + // Try to set the initial tableau assignment to an assignment that is legal with + // respect to the input network. + static const bool WARM_START; + + // The maximal number of iterations without new tree states being visited, before + // the engine performs a precision restoration. + static const unsigned MAX_ITERATIONS_WITHOUT_PROGRESS; + + // How often should the main loop check the current degradation? + static const unsigned DEGRADATION_CHECKING_FREQUENCY; + + // The threshold of degradation above which restoration is required + static const double DEGRADATION_THRESHOLD; + + // If a pivot element in a simplex iteration is smaller than this threshold, the engine will + // attempt to pick another element. + static const double ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD; + + // If true, column-merging equations are given special treatment and cause columns in the + // tableau to be merged (instead of a new row added). + static const bool USE_COLUMN_MERGING_EQUATIONS; + + // If a pivot element in a Gaussian elimination iteration is smaller than this threshold times + // the largest element in the column, the elimination engine will attempt to pick another pivot. + static const double GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD; + + // How many potential pivots should the engine inspect (at most) in every simplex iteration? + static const unsigned MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS; + + static const DivideStrategy SPLITTING_HEURISTICS; + + // The frequency to use interval splitting when largest interval splitting strategy is in use. + static const unsigned INTERVAL_SPLITTING_FREQUENCY; + + // When automatically deciding which splitting strategy to use, we use relu-splitting if + // the number of inputs is larger than this number. + static const unsigned INTERVAL_SPLITTING_THRESHOLD; + + // How often should we perform full bound tightening, on the entire contraints matrix A. + static const unsigned BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY; + + // When the row bound tightener is asked to run until saturation, it can enter an infinite loop + // due to tiny increments in bounds. This number limits the number of iterations it can perform. + static const unsigned ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS; + + // If the cost function error exceeds this threshold, it is recomputed + static const double COST_FUNCTION_ERROR_THRESHOLD; + + // Random seed for generating simulation values. + static const unsigned SIMULATION_RANDOM_SEED; + + // Random seed for EstimateVolume procedure (PreimageApproximation). + static const unsigned VOLUME_ESTIMATION_RANDOM_SEED; + + // Number of iterations for EstimateVolume procedure (PreimageApproximation). + static const unsigned VOLUME_ESTIMATION_ITERATIONS; + + // Random seed for PreimageApproximation optimization. + static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED; + + // Maximum iterations for PreimageApproximation optimization. + static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + + // Step size for PreimageApproximation optimization. + static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + + // Learning rate for PreimageApproximation optimization. + static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + + // How often should projected steepest edge reset the reference space? + static const unsigned PSE_ITERATIONS_BEFORE_RESET; + + // An error threshold which, when crossed, causes projected steepest edge to reset the reference + // space + static const double PSE_GAMMA_ERROR_THRESHOLD; + + // PSE's Gamma function's update tolerance + static const double PSE_GAMMA_UPDATE_TOLERANCE; + + // The tolerance for checking whether f = Constraint( b ), Constraint \in { ReLU, ABS, Sign} + static const double CONSTRAINT_COMPARISON_TOLERANCE; + + // Toggle between two types of LSE lower bound for softmax + static const double SOFTMAX_LSE2_THRESHOLD; + + // Should the initial basis be comprised only of auxiliary (row) variables? + static const bool ONLY_AUX_INITIAL_BASIS; + + /* + Explicit (Reluplex-style) bound tightening options + */ + + enum ExplicitBasisBoundTighteningType { + // Compute the inverse basis matrix and use it + COMPUTE_INVERTED_BASIS_MATRIX = 0, + // Use the inverted basis matrix without using it, via transformations + USE_IMPLICIT_INVERTED_BASIS_MATRIX = 1, + // Disable explicit basis bound tightening + DISABLE_EXPLICIT_BASIS_TIGHTENING = 2, + }; + + // When doing bound tightening using the explicit basis matrix, should the basis matrix be + // inverted? + static const ExplicitBasisBoundTighteningType EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE; + static const double EXPLICIT_BASIS_BOUND_TIGHTENING_ROUNDING_CONSTANT; + + // When doing explicit bound tightening, should we repeat until saturation? + static const bool EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION; + + /* + Symbolic bound tightening options + */ + + // Symbolic tightening, LP rounding constants + static const double SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; + static const double LP_TIGHTENING_ROUNDING_CONSTANT; + + static const double SIGMOID_CUTOFF_CONSTANT; + + /* + Constraint fixing heuristics + */ + + // When a PL constraint proposes a fix that affects multiple variables, should it first query + // for any relevant linear connections between the variables? + static const bool USE_SMART_FIX; + + // A heuristic for selecting which of the broken PL constraints will be repaired next. In this + // case, the one that has been repaired the least number of times so far. + static const bool USE_LEAST_FIX; + + /* + Basis factorization options + */ + + // The number of accumualted eta matrices, after which the basis will be refactorized + static const unsigned REFACTORIZATION_THRESHOLD; + + // The kind of basis factorization algorithm in use + enum BasisFactorizationType { + LU_FACTORIZATION, + SPARSE_LU_FACTORIZATION, + FORREST_TOMLIN_FACTORIZATION, + SPARSE_FORREST_TOMLIN_FACTORIZATION, + }; + static const BasisFactorizationType BASIS_FACTORIZATION_TYPE; + + /* In the polarity-based branching heuristics, only this many earliest nodes + are considered to branch on. + */ + static const unsigned POLARITY_CANDIDATES_THRESHOLD; + + /* The max number of DnC splits + */ + static const unsigned DNC_DEPTH_THRESHOLD; + + /* Minimal coefficient of a variable in a Tableau row, that is used for bound tightening + */ + static const double MINIMAL_COEFFICIENT_FOR_TIGHTENING; + + /* The tolerance of errors when checking lemmas in the proof-checking process + */ + static const double LEMMA_CERTIFICATION_TOLERANCE; + + /* Denote whether proofs should be written as a JSON file + */ + static const bool WRITE_JSON_PROOF; + + /* How many layers after the current layer do we encode in backward analysis. + */ + static const unsigned BACKWARD_BOUND_PROPAGATION_DEPTH; + + /* How many rounds of backward analysis to perform? + */ + static const unsigned MAX_ROUNDS_OF_BACKWARD_ANALYSIS; + +#ifdef ENABLE_GUROBI + /* + The number of threads Gurobi spawns + */ + static const unsigned GUROBI_NUMBER_OF_THREADS; + static const bool GUROBI_LOGGING; +#endif // ENABLE_GUROBI + + /* + Logging options + */ + static const bool DNC_MANAGER_LOGGING; + static const bool ENGINE_LOGGING; + static const bool TABLEAU_LOGGING; + static const bool SMT_CORE_LOGGING; + static const bool DANTZIGS_RULE_LOGGING; + static const bool BASIS_FACTORIZATION_LOGGING; + static const bool PREPROCESSOR_LOGGING; + static const bool INPUT_QUERY_LOGGING; + static const bool PROJECTED_STEEPEST_EDGE_LOGGING; + static const bool GAUSSIAN_ELIMINATION_LOGGING; + static const bool QUERY_LOADER_LOGGING; + static const bool SYMBOLIC_BOUND_TIGHTENER_LOGGING; + static const bool NETWORK_LEVEL_REASONER_LOGGING; + static const bool MPS_PARSER_LOGGING; + static const bool ONNX_PARSER_LOGGING; + static const bool SOI_LOGGING; + static const bool SCORE_TRACKER_LOGGING; + static const bool CEGAR_LOGGING; +}; + +#endif // __GlobalConfiguration_h__ + +// +// Local Variables: +// compile-command: "make -C .. " +// tags-file-name: "../TAGS" +// c-basic-offset: 4 +// End: +// From 2d77fb97fe340a72f7376f9d922721e0b86919b9 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 9 Jan 2025 20:53:01 +0200 Subject: [PATCH 38/81] Delete src/nlr/GlobalConfiguration.cpp --- src/nlr/GlobalConfiguration.cpp | 238 -------------------------------- 1 file changed, 238 deletions(-) delete mode 100644 src/nlr/GlobalConfiguration.cpp diff --git a/src/nlr/GlobalConfiguration.cpp b/src/nlr/GlobalConfiguration.cpp deleted file mode 100644 index a5d5100011..0000000000 --- a/src/nlr/GlobalConfiguration.cpp +++ /dev/null @@ -1,238 +0,0 @@ -/********************* */ -/*! \file GlobalConfiguration.cpp - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz, Parth Shah, Derek Huang - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - - **/ - -#include "GlobalConfiguration.h" - -#include "DivideStrategy.h" -#include "MString.h" - -#include - - -// The exponential moving average is calculated as -// ema = current * alpha + previous * (1 - alpha) -const double GlobalConfiguration::EXPONENTIAL_MOVING_AVERAGE_ALPHA = 0.5; - -// Whether to use SoI instead of Reluplex for local search for satisfying assignments -// to non-linear constraint. -bool GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH = true; - -const double GlobalConfiguration::SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI = 5; - -// Use the polarity metrics to decide which branch to take first in a case split -// and how to repair a ReLU constraint. -const bool GlobalConfiguration::USE_POLARITY_BASED_DIRECTION_HEURISTICS = true; - -const double GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS = 0.0000000001; -const unsigned GlobalConfiguration::DEFAULT_DOUBLE_TO_STRING_PRECISION = 10; -const unsigned GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY = 10000; -const unsigned GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY_GUROBI = 100; -const double GlobalConfiguration::BOUND_COMPARISON_ADDITIVE_TOLERANCE = 0.0000001; -const double GlobalConfiguration::BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE = 0.001 * 0.0000001; -const double GlobalConfiguration::PIVOT_CHANGE_COLUMN_TOLERANCE = 0.000000001; -const double GlobalConfiguration::PIVOT_ROW_AND_COLUMN_TOLERANCE = 0.01; -const double GlobalConfiguration::ENTRY_ELIGIBILITY_TOLERANCE = 0.00000001; -const double GlobalConfiguration::RATIO_CONSTRAINT_ADDITIVE_TOLERANCE = 0.0000001 * 0.3; -const double GlobalConfiguration::RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE = - 0.001 * 0.0000001 * 0.3; -const double GlobalConfiguration::HARRIS_RATIO_CONSTRAINT_ADDITIVE_TOLERANCE = 0.0000001 * 0.5; -const double GlobalConfiguration::HARRIS_RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE = - 0.001 * 0.0000001 * 0.5; -const double GlobalConfiguration::BASIC_COSTS_ADDITIVE_TOLERANCE = 0.0000001; -const double GlobalConfiguration::BASIC_COSTS_MULTIPLICATIVE_TOLERANCE = 0.001 * 0.0000001; -const double GlobalConfiguration::SPARSE_FORREST_TOMLIN_DIAGONAL_ELEMENT_TOLERANCE = 0.00001; -const unsigned GlobalConfiguration::DEGRADATION_CHECKING_FREQUENCY = 100; -const double GlobalConfiguration::DEGRADATION_THRESHOLD = 0.1; -const double GlobalConfiguration::ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD = 0.0001; -const bool GlobalConfiguration::USE_COLUMN_MERGING_EQUATIONS = false; -const double GlobalConfiguration::GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD = 0.1; -const unsigned GlobalConfiguration::MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS = 5; -const DivideStrategy GlobalConfiguration::SPLITTING_HEURISTICS = DivideStrategy::ReLUViolation; -const unsigned GlobalConfiguration::INTERVAL_SPLITTING_FREQUENCY = 10; -const unsigned GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD = 10; -const unsigned GlobalConfiguration::BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY = 100; -const unsigned GlobalConfiguration::ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS = 20; -const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; - -const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; -const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; -const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; -const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 25000; -const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 25; -const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.025; -const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.25; - -const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; - -const double GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT = 0.00000000001; -const double GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT = 0.00000001; - -const double GlobalConfiguration::SIGMOID_CUTOFF_CONSTANT = 20; - -const bool GlobalConfiguration::PREPROCESS_INPUT_QUERY = true; -const bool GlobalConfiguration::PREPROCESSOR_ELIMINATE_VARIABLES = true; -const bool GlobalConfiguration::PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; -const bool GlobalConfiguration::NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; -const double GlobalConfiguration::PREPROCESSOR_ALMOST_FIXED_THRESHOLD = 0.00001; - -const unsigned GlobalConfiguration::PREPROCESSSING_MAX_TIGHTEING_ROUND = 10; - -const bool GlobalConfiguration::WARM_START = false; - -const unsigned GlobalConfiguration::MAX_ITERATIONS_WITHOUT_PROGRESS = 10000; - -const unsigned GlobalConfiguration::PSE_ITERATIONS_BEFORE_RESET = 1000; -const double GlobalConfiguration::PSE_GAMMA_ERROR_THRESHOLD = 0.001; -const double GlobalConfiguration::PSE_GAMMA_UPDATE_TOLERANCE = 0.000000001; - -const double GlobalConfiguration::CONSTRAINT_COMPARISON_TOLERANCE = 0.00001; - -const double GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD = 0.6; - -const bool GlobalConfiguration::ONLY_AUX_INITIAL_BASIS = false; - -const GlobalConfiguration::ExplicitBasisBoundTighteningType - GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE = - GlobalConfiguration::COMPUTE_INVERTED_BASIS_MATRIX; -const bool GlobalConfiguration::EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION = false; -const double GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_ROUNDING_CONSTANT = 1e-6; - -const unsigned GlobalConfiguration::REFACTORIZATION_THRESHOLD = 100; -const GlobalConfiguration::BasisFactorizationType GlobalConfiguration::BASIS_FACTORIZATION_TYPE = - GlobalConfiguration::SPARSE_FORREST_TOMLIN_FACTORIZATION; - -const unsigned GlobalConfiguration::POLARITY_CANDIDATES_THRESHOLD = 5; - -const unsigned GlobalConfiguration::DNC_DEPTH_THRESHOLD = 5; - -const double GlobalConfiguration::MINIMAL_COEFFICIENT_FOR_TIGHTENING = 0.01; -const double GlobalConfiguration::LEMMA_CERTIFICATION_TOLERANCE = 0.000001; -const bool GlobalConfiguration::WRITE_JSON_PROOF = false; - -const unsigned GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH = 3; -const unsigned GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS = 10; - -#ifdef ENABLE_GUROBI -const unsigned GlobalConfiguration::GUROBI_NUMBER_OF_THREADS = 1; -const bool GlobalConfiguration::GUROBI_LOGGING = false; -#endif // ENABLE_GUROBI - -// Logging - note that it is enabled only in Debug mode -const bool GlobalConfiguration::DNC_MANAGER_LOGGING = false; -const bool GlobalConfiguration::ENGINE_LOGGING = false; -const bool GlobalConfiguration::TABLEAU_LOGGING = false; -const bool GlobalConfiguration::SMT_CORE_LOGGING = false; -const bool GlobalConfiguration::DANTZIGS_RULE_LOGGING = false; -const bool GlobalConfiguration::BASIS_FACTORIZATION_LOGGING = false; -const bool GlobalConfiguration::PREPROCESSOR_LOGGING = false; -const bool GlobalConfiguration::INPUT_QUERY_LOGGING = false; -const bool GlobalConfiguration::PROJECTED_STEEPEST_EDGE_LOGGING = false; -const bool GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING = false; -const bool GlobalConfiguration::QUERY_LOADER_LOGGING = false; -const bool GlobalConfiguration::SYMBOLIC_BOUND_TIGHTENER_LOGGING = false; -const bool GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING = false; -const bool GlobalConfiguration::MPS_PARSER_LOGGING = false; -const bool GlobalConfiguration::ONNX_PARSER_LOGGING = false; -const bool GlobalConfiguration::SOI_LOGGING = false; -const bool GlobalConfiguration::SCORE_TRACKER_LOGGING = false; -const bool GlobalConfiguration::CEGAR_LOGGING = false; - -const bool GlobalConfiguration::USE_SMART_FIX = false; -const bool GlobalConfiguration::USE_LEAST_FIX = false; - -void GlobalConfiguration::print() -{ - printf( "****************************\n" ); - printf( "*** Global Configuraiton ***\n" ); - printf( "****************************\n" ); - printf( " DEFAULT_EPSILON_FOR_COMPARISONS: %.15lf\n", DEFAULT_EPSILON_FOR_COMPARISONS ); - printf( " DEFAULT_DOUBLE_TO_STRING_PRECISION: %u\n", DEFAULT_DOUBLE_TO_STRING_PRECISION ); - printf( " STATISTICS_PRINTING_FREQUENCY: %u\n", STATISTICS_PRINTING_FREQUENCY ); - printf( " BOUND_COMPARISON_ADDITIVE_TOLERANCE: %.15lf\n", - BOUND_COMPARISON_ADDITIVE_TOLERANCE ); - printf( " BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE: %.15lf\n", - BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE ); - printf( " PIVOT_CHANGE_COLUMN_TOLERANCE: %.15lf\n", PIVOT_CHANGE_COLUMN_TOLERANCE ); - printf( " RATIO_CONSTRAINT_ADDITIVE_TOLERANCE: %.15lf\n", - RATIO_CONSTRAINT_ADDITIVE_TOLERANCE ); - printf( " RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE: %.15lf\n", - RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE ); - printf( " BASIC_COSTS_ADDITIVE_TOLERANCE: %.15lf\n", BASIC_COSTS_ADDITIVE_TOLERANCE ); - printf( " BASIC_COSTS_MULTIPLICATIVE_TOLERANCE: %.15lf\n", - BASIC_COSTS_MULTIPLICATIVE_TOLERANCE ); - printf( " DEGRADATION_CHECKING_FREQUENCY: %u\n", DEGRADATION_CHECKING_FREQUENCY ); - printf( " DEGRADATION_THRESHOLD: %.15lf\n", DEGRADATION_THRESHOLD ); - printf( " ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD: %.15lf\n", ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD ); - printf( " USE_COLUMN_MERGING_EQUATIONS: %s\n", USE_COLUMN_MERGING_EQUATIONS ? "Yes" : "No" ); - printf( " GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD: %.15lf\n", - GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD ); - printf( " MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS: %u\n", MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS ); - printf( " BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY: %u\n", - BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY ); - printf( " COST_FUNCTION_ERROR_THRESHOLD: %.15lf\n", COST_FUNCTION_ERROR_THRESHOLD ); - printf( " USE_HARRIS_RATIO_TEST: %s\n", USE_HARRIS_RATIO_TEST ? "Yes" : "No" ); - - printf( " PREPROCESS_INPUT_QUERY: %s\n", PREPROCESS_INPUT_QUERY ? "Yes" : "No" ); - printf( " PREPROCESSOR_ELIMINATE_VARIABLES: %s\n", - PREPROCESSOR_ELIMINATE_VARIABLES ? "Yes" : "No" ); - printf( " PSE_ITERATIONS_BEFORE_RESET: %u\n", PSE_ITERATIONS_BEFORE_RESET ); - printf( " PSE_GAMMA_ERROR_THRESHOLD: %.15lf\n", PSE_GAMMA_ERROR_THRESHOLD ); - printf( " CONSTRAINT_COMPARISON_TOLERANCE: %.15lf\n", CONSTRAINT_COMPARISON_TOLERANCE ); - - String basisBoundTighteningType; - switch ( EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE ) - { - case COMPUTE_INVERTED_BASIS_MATRIX: - basisBoundTighteningType = "Compute inverted basis matrix"; - break; - - case USE_IMPLICIT_INVERTED_BASIS_MATRIX: - basisBoundTighteningType = "Use implicit inverted basis matrix"; - break; - - default: - basisBoundTighteningType = "Unknown"; - break; - } - - printf( " EXPLICIT_BASIS_BOUND_TIGHTENING_INVERT_BASIS: %s\n", - basisBoundTighteningType.ascii() ); - printf( " EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION: %s\n", - EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION ? "Yes" : "No" ); - printf( " REFACTORIZATION_THRESHOLD: %u\n", REFACTORIZATION_THRESHOLD ); - - String basisFactorizationType; - if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == GlobalConfiguration::LU_FACTORIZATION ) - basisFactorizationType = "LU_FACTORIZATION"; - else if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == - GlobalConfiguration::SPARSE_LU_FACTORIZATION ) - basisFactorizationType = "SPARSE_LU_FACTORIZATION"; - else if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == - GlobalConfiguration::FORREST_TOMLIN_FACTORIZATION ) - basisFactorizationType = "FORREST_TOMLIN_FACTORIZATION"; - else - basisFactorizationType = "Unknown"; - - printf( " BASIS_FACTORIZATION_TYPE: %s\n", basisFactorizationType.ascii() ); - printf( "****************************\n" ); -} - -// -// Local Variables: -// compile-command: "make -C ../.. " -// tags-file-name: "../../TAGS" -// c-basic-offset: 4 -// End: -// From 304efac01a1c8676dc79a2faad30b00ce9d9e4b7 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 9 Jan 2025 20:53:22 +0200 Subject: [PATCH 39/81] Delete src/nlr/GlobalConfiguration.h --- src/nlr/GlobalConfiguration.h | 318 ---------------------------------- 1 file changed, 318 deletions(-) delete mode 100644 src/nlr/GlobalConfiguration.h diff --git a/src/nlr/GlobalConfiguration.h b/src/nlr/GlobalConfiguration.h deleted file mode 100644 index 92dcfdd049..0000000000 --- a/src/nlr/GlobalConfiguration.h +++ /dev/null @@ -1,318 +0,0 @@ -/********************* */ -/*! \file GlobalConfiguration.h - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz, Parth Shah, Derek Huang - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - - **/ - -#ifndef __GlobalConfiguration_h__ -#define __GlobalConfiguration_h__ - -#include "DivideStrategy.h" - -class GlobalConfiguration -{ -public: - static void print(); - - // The exponential moving average is calculated as - // ema = current * alpha + previous * (1 - alpha) - static const double EXPONENTIAL_MOVING_AVERAGE_ALPHA; - - // Whether to use SoI instead of Reluplex for local search for satisfying assignments - // to non-linear constraint. - static bool USE_DEEPSOI_LOCAL_SEARCH; - - // The quantity by which the score is bumped up for PLContraints not - // participating in the SoI. This promotes those constraints in the branching - // order. - static const double SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI; - - // Use the polarity metrics to decide which branch to take first in a case split - // and how to repair a ReLU constraint. - static const bool USE_POLARITY_BASED_DIRECTION_HEURISTICS; - - // The default epsilon used for comparing doubles - static const double DEFAULT_EPSILON_FOR_COMPARISONS; - - // The precision level when convering doubles to strings - static const unsigned DEFAULT_DOUBLE_TO_STRING_PRECISION; - - // How often should the main loop print statistics? - static const unsigned STATISTICS_PRINTING_FREQUENCY; - static const unsigned STATISTICS_PRINTING_FREQUENCY_GUROBI; - - // Tolerance when checking whether the value computed for a basic variable is out of bounds - static const double BOUND_COMPARISON_ADDITIVE_TOLERANCE; - static const double BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE; - - // Tolerance when checking whether a basic variable depends on a non-basic variable, by looking - // at the change column, as part of a pivot operation. - static const double PIVOT_CHANGE_COLUMN_TOLERANCE; - - // Tolerance for the difference when computing the pivot entry by column and by row - static const double PIVOT_ROW_AND_COLUMN_TOLERANCE; - - // Tolerance when checking whether a non-basic variable is eligible for being selected as the - // entering variable, by its reduced cost - static const double ENTRY_ELIGIBILITY_TOLERANCE; - - // Ratio test tolerance constants - static const double RATIO_CONSTRAINT_ADDITIVE_TOLERANCE; - static const double RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE; - static const double HARRIS_RATIO_CONSTRAINT_ADDITIVE_TOLERANCE; - static const double HARRIS_RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE; - - // Cost function tolerance constants - static const double BASIC_COSTS_ADDITIVE_TOLERANCE; - static const double BASIC_COSTS_MULTIPLICATIVE_TOLERANCE; - - // Sparse ForrestTomlin diagonal element tolerance constant - static const double SPARSE_FORREST_TOMLIN_DIAGONAL_ELEMENT_TOLERANCE; - - // Toggle use of Harris' two-pass ratio test for selecting the leaving variable - static const bool USE_HARRIS_RATIO_TEST; - - // Toggle query-preprocessing on/off. - static const bool PREPROCESS_INPUT_QUERY; - - // Assuming the preprocessor is on, toggle whether or not it will attempt to perform variable - // elimination. - static const bool PREPROCESSOR_ELIMINATE_VARIABLES; - - // Toggle whether or not PL/NL constraints will be called upon - // to add auxiliary variables and equations after preprocessing. - static const bool PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING; - static const bool NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING; - - // If the difference between a variable's lower and upper bounds is smaller than this - // threshold, the preprocessor will treat it as fixed. - static const double PREPROCESSOR_ALMOST_FIXED_THRESHOLD; - - // Maximal rounds of tightening to perform in the preprocessor to avoid non-termination. - static const unsigned PREPROCESSSING_MAX_TIGHTEING_ROUND; - - // Try to set the initial tableau assignment to an assignment that is legal with - // respect to the input network. - static const bool WARM_START; - - // The maximal number of iterations without new tree states being visited, before - // the engine performs a precision restoration. - static const unsigned MAX_ITERATIONS_WITHOUT_PROGRESS; - - // How often should the main loop check the current degradation? - static const unsigned DEGRADATION_CHECKING_FREQUENCY; - - // The threshold of degradation above which restoration is required - static const double DEGRADATION_THRESHOLD; - - // If a pivot element in a simplex iteration is smaller than this threshold, the engine will - // attempt to pick another element. - static const double ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD; - - // If true, column-merging equations are given special treatment and cause columns in the - // tableau to be merged (instead of a new row added). - static const bool USE_COLUMN_MERGING_EQUATIONS; - - // If a pivot element in a Gaussian elimination iteration is smaller than this threshold times - // the largest element in the column, the elimination engine will attempt to pick another pivot. - static const double GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD; - - // How many potential pivots should the engine inspect (at most) in every simplex iteration? - static const unsigned MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS; - - static const DivideStrategy SPLITTING_HEURISTICS; - - // The frequency to use interval splitting when largest interval splitting strategy is in use. - static const unsigned INTERVAL_SPLITTING_FREQUENCY; - - // When automatically deciding which splitting strategy to use, we use relu-splitting if - // the number of inputs is larger than this number. - static const unsigned INTERVAL_SPLITTING_THRESHOLD; - - // How often should we perform full bound tightening, on the entire contraints matrix A. - static const unsigned BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY; - - // When the row bound tightener is asked to run until saturation, it can enter an infinite loop - // due to tiny increments in bounds. This number limits the number of iterations it can perform. - static const unsigned ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS; - - // If the cost function error exceeds this threshold, it is recomputed - static const double COST_FUNCTION_ERROR_THRESHOLD; - - // Random seed for generating simulation values. - static const unsigned SIMULATION_RANDOM_SEED; - - // Random seed for EstimateVolume procedure (PreimageApproximation). - static const unsigned VOLUME_ESTIMATION_RANDOM_SEED; - - // Number of iterations for EstimateVolume procedure (PreimageApproximation). - static const unsigned VOLUME_ESTIMATION_ITERATIONS; - - // Random seed for PreimageApproximation optimization. - static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED; - - // Maximum iterations for PreimageApproximation optimization. - static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; - - // Step size for PreimageApproximation optimization. - static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; - - // Learning rate for PreimageApproximation optimization. - static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; - - // How often should projected steepest edge reset the reference space? - static const unsigned PSE_ITERATIONS_BEFORE_RESET; - - // An error threshold which, when crossed, causes projected steepest edge to reset the reference - // space - static const double PSE_GAMMA_ERROR_THRESHOLD; - - // PSE's Gamma function's update tolerance - static const double PSE_GAMMA_UPDATE_TOLERANCE; - - // The tolerance for checking whether f = Constraint( b ), Constraint \in { ReLU, ABS, Sign} - static const double CONSTRAINT_COMPARISON_TOLERANCE; - - // Toggle between two types of LSE lower bound for softmax - static const double SOFTMAX_LSE2_THRESHOLD; - - // Should the initial basis be comprised only of auxiliary (row) variables? - static const bool ONLY_AUX_INITIAL_BASIS; - - /* - Explicit (Reluplex-style) bound tightening options - */ - - enum ExplicitBasisBoundTighteningType { - // Compute the inverse basis matrix and use it - COMPUTE_INVERTED_BASIS_MATRIX = 0, - // Use the inverted basis matrix without using it, via transformations - USE_IMPLICIT_INVERTED_BASIS_MATRIX = 1, - // Disable explicit basis bound tightening - DISABLE_EXPLICIT_BASIS_TIGHTENING = 2, - }; - - // When doing bound tightening using the explicit basis matrix, should the basis matrix be - // inverted? - static const ExplicitBasisBoundTighteningType EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE; - static const double EXPLICIT_BASIS_BOUND_TIGHTENING_ROUNDING_CONSTANT; - - // When doing explicit bound tightening, should we repeat until saturation? - static const bool EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION; - - /* - Symbolic bound tightening options - */ - - // Symbolic tightening, LP rounding constants - static const double SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; - static const double LP_TIGHTENING_ROUNDING_CONSTANT; - - static const double SIGMOID_CUTOFF_CONSTANT; - - /* - Constraint fixing heuristics - */ - - // When a PL constraint proposes a fix that affects multiple variables, should it first query - // for any relevant linear connections between the variables? - static const bool USE_SMART_FIX; - - // A heuristic for selecting which of the broken PL constraints will be repaired next. In this - // case, the one that has been repaired the least number of times so far. - static const bool USE_LEAST_FIX; - - /* - Basis factorization options - */ - - // The number of accumualted eta matrices, after which the basis will be refactorized - static const unsigned REFACTORIZATION_THRESHOLD; - - // The kind of basis factorization algorithm in use - enum BasisFactorizationType { - LU_FACTORIZATION, - SPARSE_LU_FACTORIZATION, - FORREST_TOMLIN_FACTORIZATION, - SPARSE_FORREST_TOMLIN_FACTORIZATION, - }; - static const BasisFactorizationType BASIS_FACTORIZATION_TYPE; - - /* In the polarity-based branching heuristics, only this many earliest nodes - are considered to branch on. - */ - static const unsigned POLARITY_CANDIDATES_THRESHOLD; - - /* The max number of DnC splits - */ - static const unsigned DNC_DEPTH_THRESHOLD; - - /* Minimal coefficient of a variable in a Tableau row, that is used for bound tightening - */ - static const double MINIMAL_COEFFICIENT_FOR_TIGHTENING; - - /* The tolerance of errors when checking lemmas in the proof-checking process - */ - static const double LEMMA_CERTIFICATION_TOLERANCE; - - /* Denote whether proofs should be written as a JSON file - */ - static const bool WRITE_JSON_PROOF; - - /* How many layers after the current layer do we encode in backward analysis. - */ - static const unsigned BACKWARD_BOUND_PROPAGATION_DEPTH; - - /* How many rounds of backward analysis to perform? - */ - static const unsigned MAX_ROUNDS_OF_BACKWARD_ANALYSIS; - -#ifdef ENABLE_GUROBI - /* - The number of threads Gurobi spawns - */ - static const unsigned GUROBI_NUMBER_OF_THREADS; - static const bool GUROBI_LOGGING; -#endif // ENABLE_GUROBI - - /* - Logging options - */ - static const bool DNC_MANAGER_LOGGING; - static const bool ENGINE_LOGGING; - static const bool TABLEAU_LOGGING; - static const bool SMT_CORE_LOGGING; - static const bool DANTZIGS_RULE_LOGGING; - static const bool BASIS_FACTORIZATION_LOGGING; - static const bool PREPROCESSOR_LOGGING; - static const bool INPUT_QUERY_LOGGING; - static const bool PROJECTED_STEEPEST_EDGE_LOGGING; - static const bool GAUSSIAN_ELIMINATION_LOGGING; - static const bool QUERY_LOADER_LOGGING; - static const bool SYMBOLIC_BOUND_TIGHTENER_LOGGING; - static const bool NETWORK_LEVEL_REASONER_LOGGING; - static const bool MPS_PARSER_LOGGING; - static const bool ONNX_PARSER_LOGGING; - static const bool SOI_LOGGING; - static const bool SCORE_TRACKER_LOGGING; - static const bool CEGAR_LOGGING; -}; - -#endif // __GlobalConfiguration_h__ - -// -// Local Variables: -// compile-command: "make -C .. " -// tags-file-name: "../TAGS" -// c-basic-offset: 4 -// End: -// From 35cbdf5490c2d2d1b55ffee522307f7dd76b4b1e Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 9 Jan 2025 20:53:53 +0200 Subject: [PATCH 40/81] Add files via upload --- src/nlr/AdamOptimizer.h | 51 ++++++- src/nlr/CoordinateDescent.h | 12 ++ src/nlr/GradientDescent.h | 52 ++++++- src/nlr/LPFormulator.cpp | 233 +++++++++++++++++++++++------ src/nlr/LPFormulator.h | 12 +- src/nlr/Layer.cpp | 289 +++++++++++++++++++++++++++++++----- src/nlr/Layer.h | 2 + 7 files changed, 561 insertions(+), 90 deletions(-) diff --git a/src/nlr/AdamOptimizer.h b/src/nlr/AdamOptimizer.h index 7d97046329..018cd34325 100644 --- a/src/nlr/AdamOptimizer.h +++ b/src/nlr/AdamOptimizer.h @@ -12,6 +12,7 @@ #include // for std::sort #include +#include #include #include #include @@ -200,11 +201,10 @@ template struct adam_optimizer_parameters Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; Real lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; - Real lr = 0.05; Real epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; Real beta1 = 0.9; Real beta2 = 0.999; - Real weight_decay = 0.2; + Real weight_decay = 0; bool maximize = false; size_t max_iterations = 100; ArgumentContainer const *initial_guess = nullptr; @@ -295,6 +295,8 @@ ArgumentContainer adam_optimizer( auto epsilon = cd_params.epsilon; auto beta1 = cd_params.beta1; auto beta2 = cd_params.beta2; + auto lower_bounds = cd_params.lower_bounds; + auto upper_bounds = cd_params.upper_bounds; auto lr = cd_params.lr; auto weight_decay = cd_params.weight_decay; auto guess = @@ -319,14 +321,31 @@ ArgumentContainer adam_optimizer( for ( size_t i = 0; i < max_iterations; ++i ) { + double current_cost = cost_function( guess[0] ); + std::cout << " current_cost = " << current_cost << std::endl; for ( size_t j = 0; j < dimension; ++j ) { candidates[j] = guess[0]; candidates[j][j] += step_size; + if ( candidates[j][j] > upper_bounds[j] || candidates[j][j] < lower_bounds[j] ) + { + gradient[j] = 0; + continue; + } + size_t sign = ( maximize == false ? 1 : -1 ); double cost = cost_function( candidates[j] ); - gradient[j] = sign * cost / step_size + weight_decay * guess[0][j]; + + for ( size_t k = 0; k < dimension; ++k ) + { + std::cout << "candidates[" << j << "][" << k << "] = " << candidates[j][k] + << std::endl; + std::cout << "guess[0][" << k << "] = " << guess[0][k] << std::endl; + } + + std::cout << " cost = " << cost << std::endl; + gradient[j] = sign * ( cost - current_cost ) / step_size + weight_decay * guess[0][j]; if ( !FloatUtils::isNan( target_value ) && cost <= target_value ) { @@ -335,6 +354,21 @@ ArgumentContainer adam_optimizer( } } + bool gradient_is_zero = true; + for ( size_t j = 0; j < dimension; ++j ) + { + std::cout << "gradient[" << j << "] = " << gradient[j] << std::endl; + if ( FloatUtils::abs( gradient[j] ) > epsilon ) + { + gradient_is_zero = false; + } + } + + if ( gradient_is_zero ) + { + break; + } + for ( size_t j = 0; j < dimension; ++j ) { first_moment[j] = beta1 * first_moment[j] + ( 1 - beta1 ) * gradient[j]; @@ -344,6 +378,17 @@ ArgumentContainer adam_optimizer( second_moment[j] /= ( 1 - std::pow( beta2, step_size ) ); guess[0][j] -= lr * first_moment[j] / ( std::sqrt( second_moment[j] ) + epsilon ); + + + if ( guess[0][j] > upper_bounds[j] ) + { + guess[0][j] = upper_bounds[j]; + } + + if ( guess[0][j] < lower_bounds[j] ) + { + guess[0][j] = lower_bounds[j]; + } } } diff --git a/src/nlr/CoordinateDescent.h b/src/nlr/CoordinateDescent.h index 83f01ed197..f73d909c7f 100644 --- a/src/nlr/CoordinateDescent.h +++ b/src/nlr/CoordinateDescent.h @@ -251,6 +251,8 @@ ArgumentContainer coordinate_descent( const size_t candidates_number = 2 * dimension; auto step_size = cd_params.step_size; auto max_iterations = cd_params.max_iterations; + auto lower_bounds = cd_params.lower_bounds; + auto upper_bounds = cd_params.upper_bounds; auto guess = random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); @@ -272,6 +274,16 @@ ArgumentContainer coordinate_descent( size_t sign = ( j % 2 == 0 ? 1 : -1 ); candidates[j][index] += sign * step_size; + if ( candidates[j][index] > upper_bounds[j] ) + { + candidates[j][index] = upper_bounds[j]; + } + + if ( candidates[j][index] < lower_bounds[j] ) + { + candidates[j][index] = lower_bounds[j]; + } + costs[j] = cost_function( candidates[j] ); if ( current_minimum_cost && costs[j] < *current_minimum_cost ) diff --git a/src/nlr/GradientDescent.h b/src/nlr/GradientDescent.h index e86aca17d9..f582711397 100644 --- a/src/nlr/GradientDescent.h +++ b/src/nlr/GradientDescent.h @@ -12,6 +12,7 @@ #include // for std::sort #include +#include #include #include #include @@ -199,13 +200,11 @@ template struct gradient_descent_parameters ArgumentContainer upper_bounds; Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; - // size_t max_iterations = - // GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; - size_t max_iterations = 2; + size_t max_iterations = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + Real epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; bool maximize = false; Real weight_decay = 0; - // Real lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; - Real lr = 0.001; + Real lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; ArgumentContainer const *initial_guess = nullptr; }; @@ -223,6 +222,13 @@ void validate_gradient_descent_parameters( throw std::domain_error( oss.str() ); } + if ( FloatUtils::isNan( cd_params.epsilon ) || cd_params.epsilon <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": epsilon > 0 is required, but got epsilon=" << cd_params.epsilon << "."; + throw std::domain_error( oss.str() ); + } + if ( cd_params.max_iterations < 1 ) { oss << __FILE__ << ":" << __LINE__ << ":" << __func__; @@ -270,8 +276,11 @@ ArgumentContainer gradient_descent( auto step_size = cd_params.step_size; auto max_iterations = cd_params.max_iterations; auto maximize = cd_params.maximize; + auto epsilon = cd_params.epsilon; auto lr = cd_params.lr; auto weight_decay = cd_params.weight_decay; + auto lower_bounds = cd_params.lower_bounds; + auto upper_bounds = cd_params.upper_bounds; auto guess = random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); @@ -286,15 +295,21 @@ ArgumentContainer gradient_descent( for ( size_t i = 0; i < max_iterations; ++i ) { + double current_cost = cost_function( guess[0] ); for ( size_t j = 0; j < dimension; ++j ) { candidates[j] = guess[0]; candidates[j][j] += step_size; + if ( candidates[j][j] > upper_bounds[j] || candidates[j][j] < lower_bounds[j] ) + { + gradient[j] = 0; + continue; + } + size_t sign = ( maximize == false ? 1 : -1 ); double cost = cost_function( candidates[j] ); - gradient[j] = sign * cost / step_size + weight_decay * guess[0][j]; - + gradient[j] = sign * ( cost - current_cost ) / step_size + weight_decay * guess[0][j]; if ( !FloatUtils::isNan( target_value ) && cost <= target_value ) { guess[0] = candidates[j]; @@ -302,9 +317,32 @@ ArgumentContainer gradient_descent( } } + bool gradient_is_zero = true; + for ( size_t j = 0; j < dimension; ++j ) + { + if ( FloatUtils::abs( gradient[j] ) > epsilon ) + { + gradient_is_zero = false; + } + } + if ( gradient_is_zero ) + { + break; + } + for ( size_t j = 0; j < dimension; ++j ) { guess[0][j] -= lr * gradient[j]; + + if ( guess[0][j] > upper_bounds[j] ) + { + guess[0][j] = upper_bounds[j]; + } + + if ( guess[0][j] < lower_bounds[j] ) + { + guess[0][j] = lower_bounds[j]; + } } } diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index c7f3691157..31e5c31a4e 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -347,7 +347,7 @@ void LPFormulator::optimizeBoundsWithPreimageApproximation( const Map>(); + auto opt_params = gradient_descent_parameters>(); unsigned depth = layers.size(); opt_params.lower_bounds.resize( depth, 0 ); opt_params.upper_bounds.resize( depth, 1 ); @@ -356,10 +356,16 @@ void LPFormulator::optimizeBoundsWithPreimageApproximation( const MapcomputeParameterisedSymbolicBounds( optimal_coeffs[i], true ); + { + std::cout << "optimal_coeffs[" << i << "] = " << optimal_coeffs[i] << std::endl; + if ( i == layers.size() - 1 ) + layers[i]->computeParameterisedSymbolicBounds( optimal_coeffs[i], true ); + else + layers[i]->computeParameterisedSymbolicBounds( optimal_coeffs[i] ); + } optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs ); } @@ -372,50 +378,57 @@ double LPFormulator::EstimateVolume( const Map &layers, layers[i]->computeParameterisedSymbolicBounds( coeffs[i] ); std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); - double average = 0; + double log_box_volume = 0; + double sigmoid_sum = 0; + + unsigned inputLayerIndex = 0; + unsigned outputLayerIndex = layers.size() - 1; + const Layer *inputLayer = layers[inputLayerIndex]; + const Layer *outputLayer = layers[outputLayerIndex]; + + // Calculate volume of input variables' bounding box. + for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) + { + if ( inputLayer->neuronEliminated( index ) ) + continue; + + double lb = inputLayer->getLb( index ); + double ub = inputLayer->getUb( index ); + + log_box_volume += std::log( ub - lb ); + } + for ( unsigned i = 0; i < GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; ++i ) { - // Sample point from known bounds. + // Sample input point from known bounds. Map point; - for ( auto pair : layers ) + for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) { - unsigned layerIndex = pair.first; - const Layer *layer = pair.second; - for ( unsigned index = 0; index < layer->getSize(); ++index ) - { - if ( layer->neuronEliminated( index ) ) - continue; + if ( inputLayer->neuronEliminated( index ) ) + continue; - const NeuronIndex neuron( layerIndex, index ); - double lb = layer->getLb( index ); - double ub = layer->getUb( index ); - std::uniform_real_distribution<> dis( lb, ub ); - point.insert( neuron, dis( rng ) ); - } + NeuronIndex neuron( inputLayerIndex, index ); + double lb = inputLayer->getLb( index ); + double ub = inputLayer->getUb( index ); + std::uniform_real_distribution<> dis( lb, ub ); + point.insert( neuron, dis( rng ) ); } - // Compute the average sigmoid of log-sum-exp of points' difference from bounding - // hyperplanes. - double sum_exp = 0; - for ( auto pair : layers ) + // Calculate sigmoid of maximum margin from output symbolic bounds. + double max_margin = 0; + for ( unsigned index = 0; index < outputLayer->getSize(); ++index ) { - const Layer *layer = pair.second; - for ( unsigned index = 0; index < layer->getSize(); ++index ) - { - if ( layer->neuronEliminated( index ) ) - continue; + if ( outputLayer->neuronEliminated( index ) ) + continue; - double margin = layer->calculateDifferenceFromSymbolic( point, index ); - if ( FloatUtils::wellFormed( std::exp( margin ) ) ) - sum_exp += std::exp( margin ); - } + double margin = outputLayer->calculateDifferenceFromSymbolic( point, index ); + max_margin = std::max( max_margin, margin ); } - - double slse = 1 / ( 1 + ( 1 / sum_exp ) ); - average += slse / GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; + sigmoid_sum += SigmoidConstraint::sigmoid( max_margin ); } - return average; + return std::exp( log_box_volume + std::log( sigmoid_sum ) ) / + GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; } void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, @@ -1808,6 +1821,10 @@ void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, addSignLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); break; + case Layer::BILINEAR: + addBilinearLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + default: addLayerToModel( gurobi, layer, createVariables ); break; @@ -1872,9 +1889,10 @@ void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurob /* The phase of this ReLU is not yet fixed. - For y = ReLU(x), we add the following triangular relaxation: + For y = ReLU(x), we add the following relaxation: 1. y >= 0 + 2. y >= x 2. y >= coeff * x 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) */ @@ -1884,7 +1902,13 @@ void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurob terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); gurobi.addGeqConstraint( terms, 0 ); - // y >= coeff * x, i.e. y - coeff * x >= 0 (varies continuously between y >= 0 and + // y >= x, i.e. y - x >= 0. + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= coeff * x, i.e. y - coeff * x >= 0 (varies continuously between y >= 0 and // y >= alpha * x). terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1969,9 +1993,9 @@ void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurob /* 2 - y <= ----- * (1 - coeff) x + 1 (varies continuously between y <= 1 and y <= -2 / l * - x + 1). + y <= ----- * (1 - coeff) x + 1 - l + Varies continuously between y <= 1 and y <= -2/l * x + 1. */ List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1981,9 +2005,9 @@ void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurob /* 2 - y >= ----- * (1 - coeff) x - 1 (varies continuously between y >= -1 and y >= 2 / u * - x - 1). + y >= ----- * (1 - coeff) x - 1 u + Varies continuously between y >= -1 and y >= 2/u * x - 1. */ terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -2057,11 +2081,12 @@ void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper & /* The phase of this LeakyReLU is not yet fixed. - For y = LeakyReLU(x), we add the following triangular relaxation: + For y = LeakyReLU(x), we add the following relaxation: 1. y >= ((1 - alpha) * coeff + alpha) * x (varies continuously between y >= alpha * x and y >= x). 2. y >= x - 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) + 3. y >= alpha * x + 4. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) */ // y >= ((1 - alpha) * coeff + alpha) * x @@ -2077,6 +2102,12 @@ void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper & terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); gurobi.addGeqConstraint( terms, 0 ); + // y >= alpha * x, i.e. y - alpha * x >= 0 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -weight, Stringf( "x%u", sourceVariable ) ) ); @@ -2086,6 +2117,122 @@ void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper & } } +void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + Vector sourceNeurons; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + + sourceNeurons.append( sourceNeuron ); + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + double targetValue = sourceValues[0] * sourceValues[1]; + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + continue; + } + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // Billinear linear relaxation (arXiv:2405.21063v2 [cs.LG]) + // Lower bound: out >= a_l * x + b_l * y + c_l, where + // a_l = alpha1 * l_y + ( 1 - alpha1 ) * u_y + // b_l = alpha1 * l_x + ( 1 - alpha1 ) * u_x + // c_l = -alpha1 * l_x * l_y - ( 1 - alpha1 ) * u_x * u_y + + // Upper bound: out <= a_u * x + b_u * y + c_u, where + // a_u = alpha2 * u_y + ( 1 - alpha2 ) * l_y + // b_u = alpha2 * l_x + ( 1 - alpha2 ) * u_x + // c_u = -alpha2 * -l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + + // Here assume alpha1 = alpha2 = coeff (and b_l = b_u). + List terms; + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( + -coeff * sourceLbs[1] - ( 1 - coeff ) * sourceUbs[1], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( + -coeff * sourceLbs[0] - ( 1 - coeff ) * sourceUbs[0], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addGeqConstraint( terms, + -coeff * sourceLbs[0] * sourceLbs[1] - + ( 1 - coeff ) * sourceUbs[0] * sourceUbs[1] ); + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( + -coeff * sourceUbs[1] - ( 1 - coeff ) * sourceLbs[1], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( + -coeff * sourceLbs[0] - ( 1 - coeff ) * sourceUbs[0], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addLeqConstraint( terms, + -coeff * sourceLbs[0] * sourceUbs[1] - + ( 1 - coeff ) * sourceUbs[0] * sourceLbs[1] ); + } + } +} + +void LPFormulator::addSigmoidLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ +} + void LPFormulator::setCutoff( double cutoff ) { _cutoffInUse = true; diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index f1a5849004..cdfca2e81d 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -16,7 +16,7 @@ #ifndef __LPFormulator_h__ #define __LPFormulator_h__ -#include "AdamOptimizer.h" +#include "GradientDescent.h" #include "GurobiWrapper.h" #include "LayerOwner.h" #include "Map.h" @@ -160,6 +160,16 @@ class LPFormulator : public ParallelSolver bool createVariables, double coeff ); + void addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ); + + void addSigmoidLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ); + // Estimate Volume of parameterised LP relaxations. static double EstimateVolume( const Map &layers, diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index b53777488e..cfb9749014 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -3394,19 +3394,17 @@ void Layer::computeSymbolicBoundsForWeightedSum() double Layer::calculateDifferenceFromSymbolic( Map &point, unsigned i ) const { - double lower_sum = _symbolicLowerBias[i]; - double upper_sum = _symbolicUpperBias[i]; + double lowerSum = _symbolicLowerBias[i]; + double upperSum = _symbolicUpperBias[i]; for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - const NeuronIndex neuron( 0, j ); - lower_sum += _symbolicLb[j * _size + i] * point[neuron]; - upper_sum += _symbolicUb[j * _size + i] * point[neuron]; + NeuronIndex neuron( 0, j ); + lowerSum += _symbolicLb[j * _size + i] * point[neuron]; + upperSum += _symbolicUb[j * _size + i] * point[neuron]; } - const NeuronIndex currentNeuron( _layerIndex, i ); - return FloatUtils::max( point[currentNeuron] - upper_sum, 0 ) + - FloatUtils::max( lower_sum - point[currentNeuron], 0 ); + return std::max( _ub[i] - upperSum, lowerSum - _lb[i] ); } void Layer::computeParameterisedSymbolicBounds( double coeff, bool receive ) @@ -3425,6 +3423,10 @@ void Layer::computeParameterisedSymbolicBounds( double coeff, bool receive ) computeParameterisedSymbolicBoundsForLeakyRelu( coeff ); break; + case BILINEAR: + computeParameterisedSymbolicBoundsForBilinear( coeff ); + break; + default: computeSymbolicBounds(); break; @@ -3940,28 +3942,19 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) // use the heuristic described in section 4.1 of // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf // to set the value of lambda (either 0 or 1 is considered). - if ( sourceUb > sourceLb ) - { - // lambda = 1 - // Symbolic lower bound: x_f >= x_b - // Concrete lower bound: x_f >= sourceLb - // Lower bounds are passed as is - } - else - { - // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = - // alpha and lambda = 1). Symbolic lower bound: x_f >= ((1 - coeff) * weight + - // alpha) x_b Concrete lower bound: x_f >= 0 + // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = + // alpha and lambda = 1). Symbolic lower bound: x_f >= ((1 - alpha) * coeff + + // alpha) x_b Concrete lower bound: x_f >= 0 - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] *= ( 1 - _alpha ) * coeff + _alpha; - } - - _symbolicLowerBias[i] *= _alpha; + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= ( 1 - _alpha ) * coeff + _alpha; } + + _symbolicLowerBias[i] *= ( 1 - _alpha ) * coeff + _alpha; } + else { for ( unsigned j = 0; j < _inputLayerSize; ++j ) @@ -3973,19 +3966,16 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) _symbolicLowerBias[i] *= weight; _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; - if ( sourceUb > sourceLb ) + // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = + // alpha and lambda = 1). Symbolic lower bound: x_f >= ((1 - alpha) * coeff + + // alpha) x_b Concrete lower bound: x_f >= 0 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - // Upper bounds are passed as is + _symbolicUb[j * _size + i] *= ( 1 - _alpha ) * coeff + _alpha; } - else - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] *= _alpha; - } - _symbolicUpperBias[i] *= _alpha; - } + _symbolicUpperBias[i] *= ( 1 - _alpha ) * coeff + _alpha; } /* @@ -4083,6 +4073,233 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) } } +void Layer::computeParameterisedSymbolicBoundsForBilinear( double coeff ) +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + ASSERT( sources.size() == 2 ); + + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + bool allConstant = true; + unsigned indexA = 0; + unsigned indexB = 0; + unsigned counter = 0; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + + if ( counter == 0 ) + { + indexA = sourceIndex._neuron; + } + else + { + indexB = sourceIndex._neuron; + } + ++counter; + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicUpperBias[i] = sourceValues[0] * sourceValues[1]; + _symbolicLowerBias[i] = sourceValues[0] * sourceValues[1]; + continue; + } + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + // Billinear linear relaxation (arXiv:2405.21063v2 [cs.LG]) + // Lower bound: out >= a_l * x + b_l * y + c_l, where + // a_l = alpha1 * l_y + ( 1 - alpha1 ) * u_y + // b_l = alpha1 * l_x + ( 1 - alpha1 ) * u_x + // c_l = -alpha1 * l_x * l_y - ( 1 - alpha1 ) * u_x * u_y + + // Upper bound: out <= a_u * x + b_u * y + c_u, where + // a_u = alpha2 * u_y + ( 1 - alpha2 ) * l_y + // b_u = alpha2 * l_x + ( 1 - alpha2 ) * u_x + // c_u = -alpha2 * -l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + + // Here assume alpha1 = alpha2 = coeff (and b_l = b_u). + + double a_l = coeff * sourceLbs[1] + ( 1 - coeff ) * sourceUbs[1]; + double a_u = coeff * sourceUbs[1] + ( 1 - coeff ) * sourceLbs[1]; + double b = coeff * sourceLbs[0] + ( 1 - coeff ) * sourceUbs[0]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + if ( a_l >= 0 ) + { + _symbolicLb[j * _size + i] += a_l * sourceSymbolicLb[j * sourceLayerSize + indexA]; + } + else + { + _symbolicLb[j * _size + i] += a_l * sourceSymbolicUb[j * sourceLayerSize + indexA]; + } + + if ( a_u >= 0 ) + { + _symbolicUb[j * _size + i] += a_u * sourceSymbolicUb[j * sourceLayerSize + indexA]; + } + else + { + _symbolicLb[j * _size + i] += a_u * sourceSymbolicLb[j * sourceLayerSize + indexA]; + } + + if ( b >= 0 ) + { + _symbolicLb[j * _size + i] += b * sourceSymbolicLb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += b * sourceSymbolicUb[j * sourceLayerSize + indexB]; + } + else + { + _symbolicLb[j * _size + i] += b * sourceSymbolicUb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += b * sourceSymbolicLb[j * sourceLayerSize + indexB]; + } + } + + _symbolicLowerBias[i] = + -coeff * sourceLbs[0] * sourceLbs[1] - ( 1 - coeff ) * sourceUbs[0] * sourceUbs[1]; + _symbolicUpperBias[i] = + -coeff * sourceLbs[0] * sourceUbs[1] - ( 1 - coeff ) * sourceUbs[0] * sourceLbs[1]; + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeParameterisedSymbolicBoundsForSigmoid( double coeff ) +{ +} + double Layer::softmaxLSELowerBound( const Vector &inputs, const Vector &inputLbs, const Vector &inputUbs, diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index ee9e1aba05..c73cdb3e18 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -301,6 +301,8 @@ class Layer void computeParameterisedSymbolicBoundsForRelu( double coeff ); void computeParameterisedSymbolicBoundsForSign( double coeff ); void computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ); + void computeParameterisedSymbolicBoundsForBilinear( double coeff ); + void computeParameterisedSymbolicBoundsForSigmoid( double coeff ); /* Helper functions for interval bound tightening From 927302e5fa13c8910066e935bae9a2412034e666 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 9 Jan 2025 20:54:41 +0200 Subject: [PATCH 41/81] Add files via upload --- src/nlr/tests/Test_NetworkLevelReasoner.h | 1332 ++++++++++++++++++++- 1 file changed, 1288 insertions(+), 44 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 977faa4ed6..91bd047687 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -19,6 +19,7 @@ #include "Layer.h" #include "NetworkLevelReasoner.h" #include "Options.h" +#include "PolygonalTightening.h" #include "Query.h" #include "Tightening.h" #include "Vector.h" @@ -1936,7 +1937,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - 1 R -1 R -1 2 + 1 R -1 R -1 x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -1945,7 +1946,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ R / \ R / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 + 1 -1 2 2 The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -2217,7 +2218,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - 1 S -1 S -1 2 + 1 S -1 S -1 x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -2226,7 +2227,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ S / \ S / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 + 1 -1 2 2 The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -2495,7 +2496,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - 1 Sign -1 Sign -1 2 + 1 Sign -1 Sign -1 x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -2504,7 +2505,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ Sign / \ Sign / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 + 1 -1 2 2 The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -2773,7 +2774,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - 1 Rnd -1 Rnd -1 2 + 1 Rnd -1 Rnd -1 x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -2782,7 +2783,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ Rnd / \ Rnd / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 + 1 -1 2 2 The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -3051,7 +3052,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - 1 A -1 A -1 2 + 1 A -1 A -1 x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -3060,7 +3061,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ A / \ A / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 + 1 -1 2 2 The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -3329,7 +3330,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - 1 LR -1 LR -1 2 + 1 LR -1 LR -1 x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -3338,7 +3339,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ LR / \ LR / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 + 1 -1 2 2 The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -12164,6 +12165,383 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } + void test_preimage_approximation_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + // From parameterised SBT: + Tightening( 2, -1, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), + Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), + Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, 0.38, Tightening::UB ), + + // From parameterised LP: + Tightening( 8, 0, Tightening::LB ), + Tightening( 10, -2, Tightening::LB ), + Tightening( 10, 0, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + // From parameterised SBT: + Tightening( 2, -5, Tightening::LB ), + Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), + Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 3, Tightening::UB ), + + Tightening( 7, 4, Tightening::UB ), + Tightening( 8, 2, Tightening::UB ), + + Tightening( 9, 4, Tightening::UB ), + Tightening( 10, 0.38, Tightening::UB ), + + // From parameterised LP: + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + + Tightening( 11, -0.93, Tightening::LB ), + Tightening( 10, 2, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + void test_preimage_approximation_leaky_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); @@ -12173,7 +12551,685 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_leaky_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); tableau.setLowerBound( 0, 0 ); tableau.setUpperBound( 0, 1 ); @@ -12186,20 +13242,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), } ); List bounds; @@ -12212,29 +13268,29 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - /*List expectedBounds2( - { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2.1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.35, Tightening::UB ), - Tightening( 11, 1.35, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); for ( auto &bound : bounds ) { bound.dump(); } - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) );*/ + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); // Change the current bounds @@ -12291,21 +13347,209 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation + // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - /*List expectedBounds4( - { Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), + List expectedBounds4( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu_and_bilinear2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) );*/ + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } bool boundsEqual( const List &bounds, const List &expectedBounds ) From 73ce3434e6a09640cf74c50027e37ac19d644faa Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 9 Jan 2025 20:55:25 +0200 Subject: [PATCH 42/81] Add files via upload --- src/engine/PolygonalTightening.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/engine/PolygonalTightening.h b/src/engine/PolygonalTightening.h index 0f811f6fe6..99dabafa85 100644 --- a/src/engine/PolygonalTightening.h +++ b/src/engine/PolygonalTightening.h @@ -71,7 +71,8 @@ class PolygonalTightening } allFound &= currentFound; } - bool result = allFound && _value == other._value && _type == other._type; + bool result = allFound && _value == other._value && _type == other._type && + _neuronToCoefficient.size() == other._neuronToCoefficient.size(); return result; } From 714b4e4831471361085efb84f37ba3eaa1fb4153 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 9 Jan 2025 20:56:14 +0200 Subject: [PATCH 43/81] Add files via upload --- src/configuration/GlobalConfiguration.cpp | 8 ++++---- src/configuration/GlobalConfiguration.h | 3 +++ 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index 3a08b92234..a5d5100011 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -69,10 +69,10 @@ const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; -const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 1000; -const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 100; -const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.1; -const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.05; +const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 25000; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 25; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.025; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.25; const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index 78ab9f662e..92dcfdd049 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -166,6 +166,9 @@ class GlobalConfiguration // Step size for PreimageApproximation optimization. static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + // Learning rate for PreimageApproximation optimization. + static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + // How often should projected steepest edge reset the reference space? static const unsigned PSE_ITERATIONS_BEFORE_RESET; From 061bb5705101a6eb500233368fe09d4d01869b4f Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 16 Jan 2025 20:32:08 +0200 Subject: [PATCH 44/81] Add files via upload --- src/nlr/LPFormulator.cpp | 174 +++++++++++++++++++++++-------- src/nlr/LPFormulator.h | 27 +++-- src/nlr/Layer.cpp | 137 +++++++++++++++--------- src/nlr/Layer.h | 13 +-- src/nlr/NetworkLevelReasoner.cpp | 13 ++- src/nlr/NetworkLevelReasoner.h | 2 +- 6 files changed, 249 insertions(+), 117 deletions(-) diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 31e5c31a4e..c9ff9eb6d6 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -346,11 +346,11 @@ void LPFormulator::optimizeBoundsWithPreimageApproximation( const Map>(); - unsigned depth = layers.size(); - opt_params.lower_bounds.resize( depth, 0 ); - opt_params.upper_bounds.resize( depth, 1 ); + unsigned num = getNumberOfParameters( layers ); + opt_params.lower_bounds.resize( num, 0 ); + opt_params.upper_bounds.resize( num, 1 ); double value_to_reach = 0; std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); @@ -358,15 +358,28 @@ void LPFormulator::optimizeBoundsWithPreimageApproximation( const Map> layerIndicesToParameters = + getParametersForLayers( layers, optimal_coeffs ); + for ( auto pair : layers ) { - std::cout << "optimal_coeffs[" << i << "] = " << optimal_coeffs[i] << std::endl; + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + std::vector currentLayerCoeffs = layerIndicesToParameters[layerIndex]; if ( i == layers.size() - 1 ) - layers[i]->computeParameterisedSymbolicBounds( optimal_coeffs[i], true ); + { + layer->computeParameterisedSymbolicBounds( currentLayerCoeffs, true, false ); + } else - layers[i]->computeParameterisedSymbolicBounds( optimal_coeffs[i] ); + { + layer->computeParameterisedSymbolicBounds( currentLayerCoeffs ); + } + i++; } + // Finally, run parameterised forward LP and parameterised backward LP. + optimizeBoundsWithLpRelaxation( layers, false, optimal_coeffs ); optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs ); } @@ -374,8 +387,15 @@ double LPFormulator::EstimateVolume( const Map &layers, std::vector coeffs ) { // First, run parameterised symbolic bound propagation. - for ( unsigned i = 0; i < layers.size(); ++i ) - layers[i]->computeParameterisedSymbolicBounds( coeffs[i] ); + Map> layerIndicesToParameters = + getParametersForLayers( layers, coeffs ); + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + std::vector currentLayerCoeffs = layerIndicesToParameters[layerIndex]; + layer->computeParameterisedSymbolicBounds( currentLayerCoeffs ); + } std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); double log_box_volume = 0; @@ -383,8 +403,8 @@ double LPFormulator::EstimateVolume( const Map &layers, unsigned inputLayerIndex = 0; unsigned outputLayerIndex = layers.size() - 1; - const Layer *inputLayer = layers[inputLayerIndex]; - const Layer *outputLayer = layers[outputLayerIndex]; + Layer *inputLayer = layers[inputLayerIndex]; + Layer *outputLayer = layers[outputLayerIndex]; // Calculate volume of input variables' bounding box. for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) @@ -756,8 +776,10 @@ void LPFormulator::createLPRelaxation( const Map &layers, addLayerToModel( gurobi, layer.second, false ); else { - double coeff = coeffs[currentLayerIndex]; - addLayerToParameterisedModel( gurobi, layer.second, false, coeff ); + Map> layerIndicesToParameters = + getParametersForLayers( layers, coeffs ); + std::vector currentLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; + addLayerToParameterisedModel( gurobi, layer.second, false, currentLayerCoeffs ); } } } @@ -787,8 +809,11 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers addLayerToModel( gurobi, currentLayer, true ); else { - double coeff = coeffs[currentLayerIndex]; - addLayerToParameterisedModel( gurobi, currentLayer, true, coeff ); + Map> layerIndicesToParameters = + getParametersForLayers( layers, coeffs ); + std::vector currentLayerCoeffs = + layerIndicesToParameters[currentLayerIndex]; + addLayerToParameterisedModel( gurobi, currentLayer, true, currentLayerCoeffs ); } for ( const auto &nextLayer : currentLayer->getSuccessorLayers() ) @@ -1802,27 +1827,92 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, } } +unsigned LPFormulator::getNumberOfParameters( const Map &layers ) +{ + unsigned index = 0; + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + switch ( layer->getLayerType() ) + { + case Layer::RELU: + case Layer::LEAKY_RELU: + index++; + break; + + case Layer::SIGN: + case Layer::BILINEAR: + index += 2; + break; + + default: + break; + } + } + return index; +} + +Map> +LPFormulator::getParametersForLayers( const Map &layers, + std::vector coeffs ) +{ + unsigned index = 0; + Map> layerIndicesToParameters; + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + std::vector parameters = {}; + switch ( layer->getLayerType() ) + { + case Layer::RELU: + case Layer::LEAKY_RELU: + { + parameters = std::vector( coeffs.begin() + index, coeffs.begin() + index + 1 ); + layerIndicesToParameters.insert( layerIndex, parameters ); + index++; + } + break; + + case Layer::SIGN: + case Layer::BILINEAR: + { + parameters = std::vector( coeffs.begin() + index, coeffs.begin() + index + 2 ); + layerIndicesToParameters.insert( layerIndex, parameters ); + index += 2; + } + break; + + default: + layerIndicesToParameters.insert( layerIndex, parameters ); + break; + } + } + return layerIndicesToParameters; +} + void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ) + std::vector coeffs ) { switch ( layer->getLayerType() ) { case Layer::RELU: - addReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + addReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); break; case Layer::LEAKY_RELU: - addLeakyReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + addLeakyReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); break; case Layer::SIGN: - addSignLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + addSignLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); break; case Layer::BILINEAR: - addBilinearLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + addBilinearLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); break; default: @@ -1834,8 +1924,9 @@ void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ) + std::vector coeffs ) { + double coeff = coeffs[0]; for ( unsigned i = 0; i < layer->getSize(); ++i ) { if ( !layer->neuronEliminated( i ) ) @@ -1934,7 +2025,7 @@ void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurob void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ) + std::vector coeffs ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { @@ -1993,25 +2084,25 @@ void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurob /* 2 - y <= ----- * (1 - coeff) x + 1 + y <= ----- * coeff[0] * x + 1 - l Varies continuously between y <= 1 and y <= -2/l * x + 1. */ List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( 2.0 / sourceLb * ( 1 - coeff ), + terms.append( GurobiWrapper::Term( 2.0 / sourceLb * coeffs[0], Stringf( "x%u", sourceVariable ) ) ); gurobi.addLeqConstraint( terms, 1 ); /* 2 - y >= ----- * (1 - coeff) x - 1 + y >= ----- * coeffs[1] * x - 1 u Varies continuously between y >= -1 and y >= 2/u * x - 1. */ terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -2.0 / sourceUb * ( 1 - coeff ), + terms.append( GurobiWrapper::Term( -2.0 / sourceUb * coeffs[1], Stringf( "x%u", sourceVariable ) ) ); gurobi.addGeqConstraint( terms, -1 ); } @@ -2021,9 +2112,10 @@ void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurob void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ) + std::vector coeffs ) { double slope = layer->getAlpha(); + double coeff = coeffs[0]; for ( unsigned i = 0; i < layer->getSize(); ++i ) { if ( !layer->neuronEliminated( i ) ) @@ -2120,7 +2212,7 @@ void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper & void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ) + std::vector coeffs ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { @@ -2197,42 +2289,34 @@ void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &g // b_u = alpha2 * l_x + ( 1 - alpha2 ) * u_x // c_u = -alpha2 * -l_x * u_y - ( 1 - alpha2 ) * u_x * l_y - // Here assume alpha1 = alpha2 = coeff (and b_l = b_u). List terms; terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( - -coeff * sourceLbs[1] - ( 1 - coeff ) * sourceUbs[1], + -coeffs[0] * sourceLbs[1] - ( 1 - coeffs[0] ) * sourceUbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( - -coeff * sourceLbs[0] - ( 1 - coeff ) * sourceUbs[0], + -coeffs[0] * sourceLbs[0] - ( 1 - coeffs[0] ) * sourceUbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addGeqConstraint( terms, - -coeff * sourceLbs[0] * sourceLbs[1] - - ( 1 - coeff ) * sourceUbs[0] * sourceUbs[1] ); + -coeffs[0] * sourceLbs[0] * sourceLbs[1] - + ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1] ); terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( - -coeff * sourceUbs[1] - ( 1 - coeff ) * sourceLbs[1], + -coeffs[1] * sourceUbs[1] - ( 1 - coeffs[1] ) * sourceLbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( - -coeff * sourceLbs[0] - ( 1 - coeff ) * sourceUbs[0], + -coeffs[1] * sourceLbs[0] - ( 1 - coeffs[1] ) * sourceUbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addLeqConstraint( terms, - -coeff * sourceLbs[0] * sourceUbs[1] - - ( 1 - coeff ) * sourceUbs[0] * sourceLbs[1] ); + -coeffs[1] * sourceLbs[0] * sourceUbs[1] - + ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1] ); } } } -void LPFormulator::addSigmoidLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) -{ -} - void LPFormulator::setCutoff( double cutoff ) { _cutoffInUse = true; diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index cdfca2e81d..c9164f0011 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -91,6 +91,16 @@ class LPFormulator : public ParallelSolver void addLayerToModel( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); + + // Get total number of optimizable parameters for parameterised SBT/LP relaxation. + unsigned getNumberOfParameters( const Map &layers ); + + + // Get map containing vector of optimizable parameters for parameterised SBT/LP relaxation for + // every layer index. + static Map> + getParametersForLayers( const Map &layers, std::vector coeffs ); + private: LayerOwner *_layerOwner; bool _cutoffInUse; @@ -139,36 +149,31 @@ class LPFormulator : public ParallelSolver std::vector coeffs = {} ); - // Create LP relaxations depending on an external parameter. + // Create LP relaxations depending on external parameters. void addLayerToParameterisedModel( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ); + std::vector coeffs ); void addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ); + std::vector coeffs ); void addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ); + std::vector coeffs ); void addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ); + std::vector coeffs ); void addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ); - - void addSigmoidLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ); + std::vector coeffs ); // Estimate Volume of parameterised LP relaxations. diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index cfb9749014..0ebb7872df 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -3122,7 +3122,7 @@ void Layer::computeSymbolicBoundsForBilinear() } else { - _symbolicLb[j * _size + i] += + _symbolicUb[j * _size + i] += sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; } @@ -3407,24 +3407,26 @@ double Layer::calculateDifferenceFromSymbolic( Map &point, return std::max( _ub[i] - upperSum, lowerSum - _lb[i] ); } -void Layer::computeParameterisedSymbolicBounds( double coeff, bool receive ) +void Layer::computeParameterisedSymbolicBounds( std::vector coeffs, + bool receivePolygonal, + bool receive ) { switch ( _type ) { case RELU: - computeParameterisedSymbolicBoundsForRelu( coeff ); + computeParameterisedSymbolicBoundsForRelu( coeffs, receive ); break; case SIGN: - computeParameterisedSymbolicBoundsForSign( coeff ); + computeParameterisedSymbolicBoundsForSign( coeffs, receive ); break; case LEAKY_RELU: - computeParameterisedSymbolicBoundsForLeakyRelu( coeff ); + computeParameterisedSymbolicBoundsForLeakyRelu( coeffs, receive ); break; case BILINEAR: - computeParameterisedSymbolicBoundsForBilinear( coeff ); + computeParameterisedSymbolicBoundsForBilinear( coeffs, receive ); break; default: @@ -3432,7 +3434,7 @@ void Layer::computeParameterisedSymbolicBounds( double coeff, bool receive ) break; } - if ( receive ) + if ( receivePolygonal ) { for ( unsigned i = 0; i < _size; ++i ) { @@ -3454,9 +3456,13 @@ void Layer::computeParameterisedSymbolicBounds( double coeff, bool receive ) } } -void Layer::computeParameterisedSymbolicBoundsForRelu( double coeff ) +void Layer::computeParameterisedSymbolicBoundsForRelu( std::vector coeffs, bool receive ) { + ASSERT( coeffs.size() == 1 ); + + double coeff = coeffs[0]; ASSERT( coeff >= 0 && coeff <= 1 ); + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); @@ -3613,22 +3619,29 @@ void Layer::computeParameterisedSymbolicBoundsForRelu( double coeff ) if ( _lb[i] < _symbolicLbOfLb[i] ) { _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + + if ( receive ) + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } if ( _ub[i] > _symbolicUbOfUb[i] ) { _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + + if ( receive ) + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); } } } -void Layer::computeParameterisedSymbolicBoundsForSign( double coeff ) +void Layer::computeParameterisedSymbolicBoundsForSign( std::vector coeffs, bool receive ) { - ASSERT( coeff >= 0 && coeff <= 1 ); + ASSERT( coeffs.size() == 2 ); + ASSERT( coeffs[0] >= 0 && coeffs[0] <= 1 ); + ASSERT( coeffs[1] >= 0 && coeffs[1] <= 1 ); + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); @@ -3726,9 +3739,9 @@ void Layer::computeParameterisedSymbolicBoundsForSign( double coeff ) else { // The upper bound's phase is not fixed, use parameterised - // parallelogram approximation: y >= 2 / l * ( 1 - coeff ) x + 1 + // parallelogram approximation: y <= - 2 / l * coeffs[0] * x + 1 // (varies continuously between y <= 1 and y <= -2 / l * x + 1). - double factor = -2.0 / _symbolicLbOfLb[i] * ( 1 - coeff ); + double factor = -2.0 / _symbolicLbOfLb[i] * coeffs[0]; for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicUb[j * _size + i] *= factor; @@ -3754,9 +3767,9 @@ void Layer::computeParameterisedSymbolicBoundsForSign( double coeff ) else { // The lower bound's phase is not fixed, use parameterised - // parallelogram approximation: y >= 2 / u * ( 1 - coeff ) x - 1 + // parallelogram approximation: y >= 2 / u * coeffs[1] * x - 1 // (varies continuously between y >= -1 and y >= 2 / u * x - 1). - double factor = 2.0 / _symbolicUbOfUb[i] * ( 1 - coeff ); + double factor = 2.0 / _symbolicUbOfUb[i] * coeffs[1]; for ( unsigned j = 0; j < _inputLayerSize; ++j ) { @@ -3823,22 +3836,29 @@ void Layer::computeParameterisedSymbolicBoundsForSign( double coeff ) if ( _lb[i] < _symbolicLbOfLb[i] ) { _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + + if ( receive ) + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } if ( _ub[i] > _symbolicUbOfUb[i] ) { _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + + if ( receive ) + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); } } } -void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) +void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( std::vector coeffs, + bool receive ) { ASSERT( _alpha > 0 && _alpha < 1 ); + ASSERT( coeffs.size() == 1 ); + double coeff = coeffs[0]; ASSERT( coeff >= 0 && coeff <= 1 ); std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); @@ -4060,21 +4080,30 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) if ( _lb[i] < _symbolicLbOfLb[i] ) { _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + + if ( receive ) + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } if ( _ub[i] > _symbolicUbOfUb[i] ) { _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + + if ( receive ) + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); } } } -void Layer::computeParameterisedSymbolicBoundsForBilinear( double coeff ) +void Layer::computeParameterisedSymbolicBoundsForBilinear( std::vector coeffs, + bool receive ) { + ASSERT( coeffs.size() == 2 ); + ASSERT( coeffs[0] >= 0 && coeffs[0] <= 1 ); + ASSERT( coeffs[1] >= 0 && coeffs[1] <= 1 ); + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); @@ -4175,11 +4204,10 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( double coeff ) // b_u = alpha2 * l_x + ( 1 - alpha2 ) * u_x // c_u = -alpha2 * -l_x * u_y - ( 1 - alpha2 ) * u_x * l_y - // Here assume alpha1 = alpha2 = coeff (and b_l = b_u). - - double a_l = coeff * sourceLbs[1] + ( 1 - coeff ) * sourceUbs[1]; - double a_u = coeff * sourceUbs[1] + ( 1 - coeff ) * sourceLbs[1]; - double b = coeff * sourceLbs[0] + ( 1 - coeff ) * sourceUbs[0]; + double a_l = coeffs[0] * sourceLbs[1] + ( 1 - coeffs[0] ) * sourceUbs[1]; + double a_u = coeffs[1] * sourceUbs[1] + ( 1 - coeffs[1] ) * sourceLbs[1]; + double b_l = coeffs[0] * sourceLbs[0] + ( 1 - coeffs[0] ) * sourceUbs[0]; + double b_u = coeffs[1] * sourceLbs[0] + ( 1 - coeffs[1] ) * sourceUbs[0]; for ( unsigned j = 0; j < _inputLayerSize; ++j ) { @@ -4198,25 +4226,32 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( double coeff ) } else { - _symbolicLb[j * _size + i] += a_u * sourceSymbolicLb[j * sourceLayerSize + indexA]; + _symbolicUb[j * _size + i] += a_u * sourceSymbolicLb[j * sourceLayerSize + indexA]; + } + + if ( b_l >= 0 ) + { + _symbolicLb[j * _size + i] += b_l * sourceSymbolicLb[j * sourceLayerSize + indexB]; + } + else + { + _symbolicLb[j * _size + i] += b_l * sourceSymbolicUb[j * sourceLayerSize + indexB]; } - if ( b >= 0 ) + if ( b_l >= 0 ) { - _symbolicLb[j * _size + i] += b * sourceSymbolicLb[j * sourceLayerSize + indexB]; - _symbolicUb[j * _size + i] += b * sourceSymbolicUb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += b_u * sourceSymbolicUb[j * sourceLayerSize + indexB]; } else { - _symbolicLb[j * _size + i] += b * sourceSymbolicUb[j * sourceLayerSize + indexB]; - _symbolicUb[j * _size + i] += b * sourceSymbolicLb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += b_u * sourceSymbolicLb[j * sourceLayerSize + indexB]; } } - _symbolicLowerBias[i] = - -coeff * sourceLbs[0] * sourceLbs[1] - ( 1 - coeff ) * sourceUbs[0] * sourceUbs[1]; - _symbolicUpperBias[i] = - -coeff * sourceLbs[0] * sourceUbs[1] - ( 1 - coeff ) * sourceUbs[0] * sourceLbs[1]; + _symbolicLowerBias[i] = -coeffs[0] * sourceLbs[0] * sourceLbs[1] - + ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1]; + _symbolicUpperBias[i] = -coeffs[1] * sourceLbs[0] * sourceUbs[1] - + ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1]; double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); @@ -4283,23 +4318,23 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( double coeff ) if ( _lb[i] < _symbolicLbOfLb[i] ) { _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + + if ( receive ) + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } if ( _ub[i] > _symbolicUbOfUb[i] ) { _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + + if ( receive ) + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); } } } -void Layer::computeParameterisedSymbolicBoundsForSigmoid( double coeff ) -{ -} - double Layer::softmaxLSELowerBound( const Vector &inputs, const Vector &inputLbs, const Vector &inputUbs, diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index c73cdb3e18..fef75a1f8a 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -137,7 +137,9 @@ class Layer void obtainCurrentBounds( const Query &inputQuery ); void obtainCurrentBounds(); void computeSymbolicBounds(); - void computeParameterisedSymbolicBounds( double coeff, bool receive = false ); + void computeParameterisedSymbolicBounds( std::vector coeffs, + bool receivePolygonal = false, + bool receive = false ); double calculateDifferenceFromSymbolic( Map &point, unsigned i ) const; void computeIntervalArithmeticBounds(); @@ -298,11 +300,10 @@ class Layer /* Helper functions for parameterised symbolic bound tightening */ - void computeParameterisedSymbolicBoundsForRelu( double coeff ); - void computeParameterisedSymbolicBoundsForSign( double coeff ); - void computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ); - void computeParameterisedSymbolicBoundsForBilinear( double coeff ); - void computeParameterisedSymbolicBoundsForSigmoid( double coeff ); + void computeParameterisedSymbolicBoundsForRelu( std::vector coeffs, bool receive ); + void computeParameterisedSymbolicBoundsForSign( std::vector coeffs, bool receive ); + void computeParameterisedSymbolicBoundsForLeakyRelu( std::vector coeffs, bool receive ); + void computeParameterisedSymbolicBoundsForBilinear( std::vector coeffs, bool receive ); /* Helper functions for interval bound tightening diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index 2930770c6d..8b82ae140b 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -221,10 +221,17 @@ void NetworkLevelReasoner::symbolicBoundPropagation() _layerIndexToLayer[i]->computeSymbolicBounds(); } -void NetworkLevelReasoner::parameterisedSymbolicBoundPropagation( Map coeffs ) +void NetworkLevelReasoner::parameterisedSymbolicBoundPropagation( std::vector coeffs ) { - for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) - _layerIndexToLayer[i]->computeParameterisedSymbolicBounds( coeffs[i] ); + Map> layerIndicesToParameters = + LPFormulator::getParametersForLayers( _layerIndexToLayer, coeffs ); + for ( auto pair : _layerIndexToLayer ) + { + unsigned layerIndex = pair.first; + Layer *layer = _layerIndexToLayer[layerIndex]; + std::vector currentLayerCoeffs = layerIndicesToParameters[layerIndex]; + layer->computeParameterisedSymbolicBounds( currentLayerCoeffs ); + } } void NetworkLevelReasoner::deepPolyPropagation() diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index f6bf6cf72f..45e2f59997 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -137,7 +137,7 @@ class NetworkLevelReasoner : public LayerOwner void obtainCurrentBounds(); void intervalArithmeticBoundPropagation(); void symbolicBoundPropagation(); - void parameterisedSymbolicBoundPropagation( Map coeffs ); + void parameterisedSymbolicBoundPropagation( std::vector coeffs ); void deepPolyPropagation(); void lpRelaxationPropagation(); void LPTighteningForOneLayer( unsigned targetIndex ); From 7d4a5a4cbd9e63dc77ebd6672b236f234894f47d Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 16 Jan 2025 20:33:12 +0200 Subject: [PATCH 45/81] Add files via upload --- src/nlr/tests/Test_NetworkLevelReasoner.h | 605 +++++++++------------- 1 file changed, 242 insertions(+), 363 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 91bd047687..b23deca8cf 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -12203,7 +12203,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); @@ -12214,37 +12214,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - // From parameterised SBT: - Tightening( 2, -1, Tightening::LB ), - Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.5, Tightening::LB ), - Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), - Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, 0.38, Tightening::UB ), - - // From parameterised LP: - Tightening( 8, 0, Tightening::LB ), - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1.625, Tightening::LB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); @@ -12308,34 +12283,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { - // From parameterised SBT: - Tightening( 2, -5, Tightening::LB ), - Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), - Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 3, Tightening::UB ), - - Tightening( 7, 4, Tightening::UB ), - Tightening( 8, 2, Tightening::UB ), - - Tightening( 9, 4, Tightening::UB ), - Tightening( 10, 0.38, Tightening::UB ), - - // From parameterised LP: - Tightening( 8, 0, Tightening::LB ), - Tightening( 9, 0, Tightening::LB ), - - Tightening( 11, -0.93, Tightening::LB ), - Tightening( 10, 2, Tightening::UB ), + Tightening( 11, 0.8472, Tightening::LB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -12390,7 +12342,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); @@ -12401,27 +12353,19 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 12, -1.75, Tightening::LB ), + Tightening( 13, -4.25, Tightening::LB ), + Tightening( 13, 3.25, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 17, -11.1417, Tightening::LB ), + Tightening( 17, 10, Tightening::UB ), - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 21, -17.3084, Tightening::LB ), + Tightening( 21, 3.2160, Tightening::UB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); @@ -12434,8 +12378,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 2, 2 ); double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); tableau.setLowerBound( 4, -large ); @@ -12518,27 +12460,21 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 11, -5, Tightening::LB ), + Tightening( 12, -4.6429, Tightening::LB ), + Tightening( 13, 8.5519, Tightening::UB ), - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 17, -23.6231, Tightening::LB ), + Tightening( 17, 14.0909, Tightening::UB ), + Tightening( 18, 2, Tightening::LB ), + Tightening( 18, 28.2015, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 21, -29.2015, Tightening::LB ), + Tightening( 21, 6.5734, Tightening::UB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -12580,7 +12516,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); @@ -12591,27 +12527,17 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1.8, Tightening::LB ), + Tightening( 10, 0, Tightening::UB ), - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 11, 1.4542, Tightening::LB ), + Tightening( 11, 1.4542, Tightening::LB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); @@ -12675,27 +12601,22 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, 3.1, Tightening::UB ), + Tightening( 7, -3.2, Tightening::LB ), + Tightening( 7, 4, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 8, 3.1, Tightening::UB ), + Tightening( 9, -0.32, Tightening::LB ), - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 10, -3.8726, Tightening::LB ), + Tightening( 10, 0.03, Tightening::UB ), + Tightening( 11, 0.4074, Tightening::LB ), + Tightening( 11, 11.3243, Tightening::UB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -12723,23 +12644,34 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); @@ -12750,27 +12682,32 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 11, -4.5, Tightening::LB ), + Tightening( 11, 8.5, Tightening::UB ), + Tightening( 12, -2.225, Tightening::LB ), + Tightening( 13, 2.975, Tightening::UB ), + Tightening( 13, -4.175, Tightening::LB ), + + Tightening( 15, -0.2225, Tightening::LB ), + Tightening( 16, 2.975, Tightening::UB ), + Tightening( 16, -0.4175, Tightening::LB ), + + Tightening( 17, -11.452, Tightening::LB ), + Tightening( 17, 10.18, Tightening::UB ), + Tightening( 18, 0.87, Tightening::LB ), + Tightening( 18, 16.0688, Tightening::UB ), + + Tightening( 19, -1.1452, Tightening::LB ), + Tightening( 19, 10.18, Tightening::UB ), + Tightening( 20, 0.87, Tightening::LB ), + Tightening( 20, 16.0688, Tightening::UB ), + + Tightening( 21, -17.0684, Tightening::LB ), + Tightening( 21, 3.6767, Tightening::UB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); @@ -12783,8 +12720,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 2, 2 ); double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); tableau.setLowerBound( 4, -large ); @@ -12803,6 +12738,26 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 10, large ); tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); // Invoke DeepPoly @@ -12810,20 +12765,31 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); @@ -12836,27 +12802,32 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 11, -5.6, Tightening::LB ), + Tightening( 11, 16.4636, Tightening::UB ), + Tightening( 12, -6.0286, Tightening::LB ), + Tightening( 13, -5.9, Tightening::LB ), + Tightening( 13, 8.0468, Tightening::UB ), + + Tightening( 15, -0.6029, Tightening::LB ), + Tightening( 16, -0.59, Tightening::LB ), + Tightening( 16, 8.0468, Tightening::UB ), + + Tightening( 17, -24.8864, Tightening::LB ), + Tightening( 17, 14.3076, Tightening::UB ), + Tightening( 18, 0.75, Tightening::LB ), + Tightening( 18, 28.0272, Tightening::UB ), + + Tightening( 19, -2.4886, Tightening::LB ), + Tightening( 19, 14.3076, Tightening::UB ), + Tightening( 20, 0.75, Tightening::LB ), + Tightening( 20, 28.0272, Tightening::UB ), + + Tightening( 21, -29.9648, Tightening::LB ), + Tightening( 21, 6.9619, Tightening::UB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -12898,7 +12869,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); @@ -12908,28 +12879,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds2( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); + List expectedBounds2( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); @@ -12992,28 +12945,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds4( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); + List expectedBounds4( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -13068,7 +13003,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); @@ -13078,28 +13013,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds2( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); + List expectedBounds2( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); @@ -13112,8 +13029,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 2, 2 ); double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); tableau.setLowerBound( 4, -large ); @@ -13195,28 +13110,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds4( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); + List expectedBounds4( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -13258,7 +13155,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); @@ -13269,27 +13166,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + Tightening( 11, -2.4149, Tightening::LB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); @@ -13327,20 +13208,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); @@ -13353,27 +13234,17 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 5.8235, Tightening::UB ), - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 10, -14, Tightening::LB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 11, -24.8805, Tightening::LB ), + Tightening( 11, 14, Tightening::UB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -13420,7 +13291,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); @@ -13431,27 +13302,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 12, -1.75, Tightening::LB ), - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); @@ -13464,8 +13321,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 2, 2 ); double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); tableau.setLowerBound( 4, -large ); @@ -13528,33 +13383,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 11, -5, Tightening::LB ), + Tightening( 12, -4.6429, Tightening::LB ), - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } bool boundsEqual( const List &bounds, const List &expectedBounds ) { - if ( bounds.size() != expectedBounds.size() ) + if ( bounds.size() < expectedBounds.size() ) return false; bool allFound = true; @@ -13573,6 +13415,43 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite return allFound; } + + // Create list of all tightenings in newBounds for which there is no bound in newBounds or in + // bounds which is at least as tight. + List removeRedundancies( const List &bounds, + const List &newBounds ) + { + List minimalBounds; + + for ( const auto &newBound : newBounds ) + { + bool foundTighter = false; + for ( const auto &bound : bounds ) + { + foundTighter |= + ( newBound._type == bound._type && newBound._variable == bound._variable && + ( ( newBound._type == Tightening::LB && + FloatUtils::lte( newBound._value, bound._value, 0.0001 ) ) || + ( newBound._type == Tightening::UB && + FloatUtils::gte( newBound._value, bound._value, 0.0001 ) ) ) ); + } + + for ( const auto &bound : newBounds ) + { + foundTighter |= + ( newBound._type == bound._type && newBound._variable == bound._variable && + ( ( newBound._type == Tightening::LB && + FloatUtils::lt( newBound._value, bound._value, 0.0001 ) ) || + ( newBound._type == Tightening::UB && + FloatUtils::gt( newBound._value, bound._value, 0.0001 ) ) ) ); + } + + if ( !foundTighter ) + minimalBounds.append( newBound ); + } + return minimalBounds; + } + void updateTableau( MockTableau &tableau, List &tightenings ) { ASSERT( tableau ); From 03a74a7396dd18dafcbaebd9347b59a6b6e3601f Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 3 Feb 2025 13:24:27 +0200 Subject: [PATCH 46/81] Delete src/nlr/AdamOptimizer.h --- src/nlr/AdamOptimizer.h | 399 ---------------------------------------- 1 file changed, 399 deletions(-) delete mode 100644 src/nlr/AdamOptimizer.h diff --git a/src/nlr/AdamOptimizer.h b/src/nlr/AdamOptimizer.h deleted file mode 100644 index 018cd34325..0000000000 --- a/src/nlr/AdamOptimizer.h +++ /dev/null @@ -1,399 +0,0 @@ -/* - * Copyright Nick Thompson, 2024 - * Use, modification and distribution are subject to the - * Boost Software License, Version 1.0. (See accompanying file - * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - */ -#ifndef ADAM_OPTIMIZER -#define ADAM_OPTIMIZER - -#include "FloatUtils.h" -#include "GlobalConfiguration.h" - -#include // for std::sort -#include -#include -#include -#include -#include -#include -#include -#include // for std::false_type -#include -#include - -namespace NLR { - -template struct has_resize : std::false_type -{ -}; - -template -struct has_resize().resize( size_t{} ) )>> : std::true_type -{ -}; - -template constexpr bool has_resize_v = has_resize::value; - -template -void validate_bounds( ArgumentContainer const &lower_bounds, ArgumentContainer const &upper_bounds ) -{ - std::ostringstream oss; - - if ( lower_bounds.size() == 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The dimension of the problem cannot be zero."; - throw std::domain_error( oss.str() ); - } - - if ( upper_bounds.size() != lower_bounds.size() ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be the same number of lower bounds as upper bounds, but given "; - oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() - << " lower bounds."; - throw std::domain_error( oss.str() ); - } - - for ( size_t i = 0; i < lower_bounds.size(); ++i ) - { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - if ( lb > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be greater than or equal to the lower bound, but the " - "upper bound is " - << ub << " and the lower is " << lb << "."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( lb ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The lower bound must be finite, but got " << lb << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a " - "standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( ub ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be finite, but got " << ub << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a " - "standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - } -} - -template -std::vector random_initial_population( ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds, - size_t initial_population_size, - URBG &&gen ) -{ - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - constexpr bool has_resize = has_resize_v; - std::vector population( initial_population_size ); - auto const dimension = lower_bounds.size(); - - for ( size_t i = 0; i < population.size(); ++i ) - { - if constexpr ( has_resize ) - population[i].resize( dimension ); - else - { - // Argument type must be known at compile-time; like std::array: - if ( population[i].size() != dimension ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": For containers which do not have resize, the default size must be the " - "same as the dimension, "; - oss << "but the default container size is " << population[i].size() - << " and the dimension of the problem is " << dimension << "."; - oss << " The function argument container type is " - << typeid( ArgumentContainer ).name() << ".\n"; - throw std::runtime_error( oss.str() ); - } - } - } - - // Why don't we provide an option to initialize with (say) a Gaussian distribution? - // > If the optimum's location is fairly well known, - // > a Gaussian distribution may prove somewhat faster, although it - // > may also increase the probability that the population will converge prematurely. - // > In general, uniform distributions are preferred, since they best reflect - // > the lack of knowledge about the optimum's location. - // - Differential Evolution: A Practical Approach to Global Optimization - // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are - // preferable. So this is something that could be investigated and potentially improved. - std::uniform_real_distribution dis( DimensionlessReal( 0 ), - DimensionlessReal( 1 ) ); - for ( size_t i = 0; i < population.size(); ++i ) - { - for ( size_t j = 0; j < dimension; ++j ) - { - auto const &lb = lower_bounds[j]; - auto const &ub = upper_bounds[j]; - population[i][j] = lb + dis( gen ) * ( ub - lb ); - } - } - - return population; -} - -template -void validate_initial_guess( ArgumentContainer const &initial_guess, - ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds ) -{ - std::ostringstream oss; - auto const dimension = lower_bounds.size(); - - if ( initial_guess.size() != dimension ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The initial guess must have the same dimensions as the problem,"; - oss << ", but the problem size is " << dimension << " and the initial guess has " - << initial_guess.size() << " elements."; - throw std::domain_error( oss.str() ); - } - - for ( size_t i = 0; i < dimension; ++i ) - { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - - if ( !FloatUtils::isFinite( initial_guess[i] ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << ", the initial guess is " << initial_guess[i] - << ", make sure all elements of the initial guess are finite."; - throw std::domain_error( oss.str() ); - } - - if ( initial_guess[i] < lb || initial_guess[i] > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << " the initial guess " << initial_guess[i] - << " is not in the bounds [" << lb << ", " << ub << "]."; - throw std::domain_error( oss.str() ); - } - } -} - -// Single-Threaded Adam Optimizer - -// We provide the parameters in a struct-there are too many of them and they are too unwieldy to -// pass individually: -template struct adam_optimizer_parameters -{ - using Real = typename ArgumentContainer::value_type; - ArgumentContainer lower_bounds; - ArgumentContainer upper_bounds; - - Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; - Real lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; - Real epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; - Real beta1 = 0.9; - Real beta2 = 0.999; - Real weight_decay = 0; - bool maximize = false; - size_t max_iterations = 100; - ArgumentContainer const *initial_guess = nullptr; -}; - -template -void validate_adam_optimizer_parameters( - adam_optimizer_parameters const &cd_params ) -{ - std::ostringstream oss; - validate_bounds( cd_params.lower_bounds, cd_params.upper_bounds ); - - if ( FloatUtils::isNan( cd_params.step_size ) || cd_params.step_size <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": step_size > 0 is required, but got step_size=" << cd_params.step_size << "."; - throw std::domain_error( oss.str() ); - } - - if ( FloatUtils::isNan( cd_params.epsilon ) || cd_params.epsilon <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": epsilon > 0 is required, but got epsilon=" << cd_params.epsilon << "."; - throw std::domain_error( oss.str() ); - } - - if ( FloatUtils::isNan( cd_params.beta1 ) || cd_params.beta1 <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": beta1 > 0 is required, but got beta1=" << cd_params.beta1 << "."; - throw std::domain_error( oss.str() ); - } - - if ( FloatUtils::isNan( cd_params.beta2 ) || cd_params.beta2 <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": beta2 > 0 is required, but got beta2=" << cd_params.beta2 << "."; - throw std::domain_error( oss.str() ); - } - - if ( FloatUtils::isNan( cd_params.lr ) || cd_params.lr <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": lr > 0 is required, but got lr=" << cd_params.lr << "."; - throw std::domain_error( oss.str() ); - } - - if ( FloatUtils::isNan( cd_params.weight_decay ) || cd_params.weight_decay < 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": weight_decay >= 0 is required, but got weight_decay=" << cd_params.weight_decay - << "."; - throw std::domain_error( oss.str() ); - } - - if ( cd_params.max_iterations < 1 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be at least one generation."; - throw std::invalid_argument( oss.str() ); - } - - if ( cd_params.initial_guess ) - { - validate_initial_guess( - *cd_params.initial_guess, cd_params.lower_bounds, cd_params.upper_bounds ); - } -} - -template -ArgumentContainer adam_optimizer( - const Func cost_function, - adam_optimizer_parameters const &cd_params, - URBG &gen, - std::invoke_result_t target_value, - bool *cancellation = nullptr, - std::vector>> - *queries = nullptr, - std::invoke_result_t *current_minimum_cost = nullptr ) -{ - using ResultType = std::invoke_result_t; - - validate_adam_optimizer_parameters( cd_params ); - const size_t dimension = cd_params.lower_bounds.size(); - auto step_size = cd_params.step_size; - auto max_iterations = cd_params.max_iterations; - auto maximize = cd_params.maximize; - auto epsilon = cd_params.epsilon; - auto beta1 = cd_params.beta1; - auto beta2 = cd_params.beta2; - auto lower_bounds = cd_params.lower_bounds; - auto upper_bounds = cd_params.upper_bounds; - auto lr = cd_params.lr; - auto weight_decay = cd_params.weight_decay; - auto guess = - random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); - - if ( cd_params.initial_guess ) - { - guess[0] = *cd_params.initial_guess; - } - - std::vector candidates( dimension ); - std::vector gradient( dimension ); - std::vector first_moment( dimension ); - std::vector second_moment( dimension ); - ArgumentContainer current_minimum = guess[0]; - - for ( size_t j = 0; j < dimension; ++j ) - { - first_moment[j] = 0; - second_moment[j] = 0; - } - - for ( size_t i = 0; i < max_iterations; ++i ) - { - double current_cost = cost_function( guess[0] ); - std::cout << " current_cost = " << current_cost << std::endl; - for ( size_t j = 0; j < dimension; ++j ) - { - candidates[j] = guess[0]; - candidates[j][j] += step_size; - - if ( candidates[j][j] > upper_bounds[j] || candidates[j][j] < lower_bounds[j] ) - { - gradient[j] = 0; - continue; - } - - size_t sign = ( maximize == false ? 1 : -1 ); - double cost = cost_function( candidates[j] ); - - for ( size_t k = 0; k < dimension; ++k ) - { - std::cout << "candidates[" << j << "][" << k << "] = " << candidates[j][k] - << std::endl; - std::cout << "guess[0][" << k << "] = " << guess[0][k] << std::endl; - } - - std::cout << " cost = " << cost << std::endl; - gradient[j] = sign * ( cost - current_cost ) / step_size + weight_decay * guess[0][j]; - - if ( !FloatUtils::isNan( target_value ) && cost <= target_value ) - { - guess[0] = candidates[j]; - break; - } - } - - bool gradient_is_zero = true; - for ( size_t j = 0; j < dimension; ++j ) - { - std::cout << "gradient[" << j << "] = " << gradient[j] << std::endl; - if ( FloatUtils::abs( gradient[j] ) > epsilon ) - { - gradient_is_zero = false; - } - } - - if ( gradient_is_zero ) - { - break; - } - - for ( size_t j = 0; j < dimension; ++j ) - { - first_moment[j] = beta1 * first_moment[j] + ( 1 - beta1 ) * gradient[j]; - first_moment[j] /= ( 1 - std::pow( beta1, step_size ) ); - - second_moment[j] = beta2 * first_moment[j] + ( 1 - beta2 ) * std::pow( gradient[j], 2 ); - second_moment[j] /= ( 1 - std::pow( beta2, step_size ) ); - - guess[0][j] -= lr * first_moment[j] / ( std::sqrt( second_moment[j] ) + epsilon ); - - - if ( guess[0][j] > upper_bounds[j] ) - { - guess[0][j] = upper_bounds[j]; - } - - if ( guess[0][j] < lower_bounds[j] ) - { - guess[0][j] = lower_bounds[j]; - } - } - } - - return guess[0]; -} - -} // namespace NLR -#endif From 6ac31b9a0a14e8fd45ff311d364e59ace9ca2734 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 3 Feb 2025 13:24:50 +0200 Subject: [PATCH 47/81] Delete src/nlr/CoordinateDescent.h --- src/nlr/CoordinateDescent.h | 308 ------------------------------------ 1 file changed, 308 deletions(-) delete mode 100644 src/nlr/CoordinateDescent.h diff --git a/src/nlr/CoordinateDescent.h b/src/nlr/CoordinateDescent.h deleted file mode 100644 index f73d909c7f..0000000000 --- a/src/nlr/CoordinateDescent.h +++ /dev/null @@ -1,308 +0,0 @@ -/* - * Copyright Nick Thompson, 2024 - * Use, modification and distribution are subject to the - * Boost Software License, Version 1.0. (See accompanying file - * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - */ -#ifndef COORDINATE_DESCENT -#define COORDINATE_DESCENT - -#include "FloatUtils.h" -#include "GlobalConfiguration.h" - -#include // for std::sort -#include -#include -#include -#include -#include -#include -#include // for std::false_type -#include -#include - -namespace NLR { - -template struct has_resize : std::false_type -{ -}; - -template -struct has_resize().resize( size_t{} ) )>> : std::true_type -{ -}; - -template constexpr bool has_resize_v = has_resize::value; - -template -void validate_bounds( ArgumentContainer const &lower_bounds, ArgumentContainer const &upper_bounds ) -{ - std::ostringstream oss; - - if ( lower_bounds.size() == 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The dimension of the problem cannot be zero."; - throw std::domain_error( oss.str() ); - } - - if ( upper_bounds.size() != lower_bounds.size() ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be the same number of lower bounds as upper bounds, but given "; - oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() - << " lower bounds."; - throw std::domain_error( oss.str() ); - } - - for ( size_t i = 0; i < lower_bounds.size(); ++i ) - { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - if ( lb > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be greater than or equal to the lower bound, but the " - "upper bound is " - << ub << " and the lower is " << lb << "."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( lb ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The lower bound must be finite, but got " << lb << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a " - "standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( ub ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be finite, but got " << ub << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a " - "standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - } -} - -template -std::vector random_initial_population( ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds, - size_t initial_population_size, - URBG &&gen ) -{ - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - constexpr bool has_resize = has_resize_v; - std::vector population( initial_population_size ); - auto const dimension = lower_bounds.size(); - - for ( size_t i = 0; i < population.size(); ++i ) - { - if constexpr ( has_resize ) - population[i].resize( dimension ); - else - { - // Argument type must be known at compile-time; like std::array: - if ( population[i].size() != dimension ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": For containers which do not have resize, the default size must be the " - "same as the dimension, "; - oss << "but the default container size is " << population[i].size() - << " and the dimension of the problem is " << dimension << "."; - oss << " The function argument container type is " - << typeid( ArgumentContainer ).name() << ".\n"; - throw std::runtime_error( oss.str() ); - } - } - } - - // Why don't we provide an option to initialize with (say) a Gaussian distribution? - // > If the optimum's location is fairly well known, - // > a Gaussian distribution may prove somewhat faster, although it - // > may also increase the probability that the population will converge prematurely. - // > In general, uniform distributions are preferred, since they best reflect - // > the lack of knowledge about the optimum's location. - // - Differential Evolution: A Practical Approach to Global Optimization - // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are - // preferable. So this is something that could be investigated and potentially improved. - std::uniform_real_distribution dis( DimensionlessReal( 0 ), - DimensionlessReal( 1 ) ); - for ( size_t i = 0; i < population.size(); ++i ) - { - for ( size_t j = 0; j < dimension; ++j ) - { - auto const &lb = lower_bounds[j]; - auto const &ub = upper_bounds[j]; - population[i][j] = lb + dis( gen ) * ( ub - lb ); - } - } - - return population; -} - -template -void validate_initial_guess( ArgumentContainer const &initial_guess, - ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds ) -{ - std::ostringstream oss; - auto const dimension = lower_bounds.size(); - - if ( initial_guess.size() != dimension ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The initial guess must have the same dimensions as the problem,"; - oss << ", but the problem size is " << dimension << " and the initial guess has " - << initial_guess.size() << " elements."; - throw std::domain_error( oss.str() ); - } - - for ( size_t i = 0; i < dimension; ++i ) - { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - - if ( !FloatUtils::isFinite( initial_guess[i] ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << ", the initial guess is " << initial_guess[i] - << ", make sure all elements of the initial guess are finite."; - throw std::domain_error( oss.str() ); - } - - if ( initial_guess[i] < lb || initial_guess[i] > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << " the initial guess " << initial_guess[i] - << " is not in the bounds [" << lb << ", " << ub << "]."; - throw std::domain_error( oss.str() ); - } - } -} - -// Single-Threaded Coordinate Descent - -// We provide the parameters in a struct-there are too many of them and they are too unwieldy to -// pass individually: -template struct coordinate_descent_parameters -{ - using Real = typename ArgumentContainer::value_type; - ArgumentContainer lower_bounds; - ArgumentContainer upper_bounds; - - Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; - // size_t max_iterations = - // GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; - size_t max_iterations = 2; - ArgumentContainer const *initial_guess = nullptr; -}; - -template -void validate_coordinate_descent_parameters( - coordinate_descent_parameters const &cd_params ) -{ - std::ostringstream oss; - validate_bounds( cd_params.lower_bounds, cd_params.upper_bounds ); - - if ( FloatUtils::isNan( cd_params.step_size ) || cd_params.step_size <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": step_size > 0 is required, but got step_size=" << cd_params.step_size << "."; - throw std::domain_error( oss.str() ); - } - - if ( cd_params.max_iterations < 1 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be at least one generation."; - throw std::invalid_argument( oss.str() ); - } - - if ( cd_params.initial_guess ) - { - validate_initial_guess( - *cd_params.initial_guess, cd_params.lower_bounds, cd_params.upper_bounds ); - } -} - -template -ArgumentContainer coordinate_descent( - const Func cost_function, - coordinate_descent_parameters const &cd_params, - URBG &gen, - std::invoke_result_t target_value, - bool *cancellation = nullptr, - std::vector>> - *queries = nullptr, - std::invoke_result_t *current_minimum_cost = nullptr ) -{ - using ResultType = std::invoke_result_t; - - validate_coordinate_descent_parameters( cd_params ); - const size_t dimension = cd_params.lower_bounds.size(); - const size_t candidates_number = 2 * dimension; - auto step_size = cd_params.step_size; - auto max_iterations = cd_params.max_iterations; - auto lower_bounds = cd_params.lower_bounds; - auto upper_bounds = cd_params.upper_bounds; - auto guess = - random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); - - if ( cd_params.initial_guess ) - { - guess[0] = *cd_params.initial_guess; - } - - std::vector candidates( candidates_number ); - std::vector costs( candidates_number ); - ArgumentContainer current_minimum = guess[0]; - - for ( size_t i = 0; i < max_iterations; ++i ) - { - for ( size_t j = 0; j < candidates_number; ++j ) - { - candidates[j] = guess[0]; - size_t index = j / 2; - size_t sign = ( j % 2 == 0 ? 1 : -1 ); - candidates[j][index] += sign * step_size; - - if ( candidates[j][index] > upper_bounds[j] ) - { - candidates[j][index] = upper_bounds[j]; - } - - if ( candidates[j][index] < lower_bounds[j] ) - { - candidates[j][index] = lower_bounds[j]; - } - - costs[j] = cost_function( candidates[j] ); - - if ( current_minimum_cost && costs[j] < *current_minimum_cost ) - { - *current_minimum_cost = costs[j]; - current_minimum = candidates[j]; - } - - if ( !FloatUtils::isNan( target_value ) && costs[j] <= target_value ) - { - break; - } - } - - guess[0] = current_minimum; - } - - return guess[0]; -} - -} // namespace NLR -#endif From 756ca412740153a3d3b541e0512a0ab998461d2f Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 3 Feb 2025 13:25:25 +0200 Subject: [PATCH 48/81] Delete src/nlr/DifferentialEvolution.h --- src/nlr/DifferentialEvolution.h | 549 -------------------------------- 1 file changed, 549 deletions(-) delete mode 100644 src/nlr/DifferentialEvolution.h diff --git a/src/nlr/DifferentialEvolution.h b/src/nlr/DifferentialEvolution.h deleted file mode 100644 index eb27c0ef0e..0000000000 --- a/src/nlr/DifferentialEvolution.h +++ /dev/null @@ -1,549 +0,0 @@ -/* - * Copyright Nick Thompson, 2024 - * Use, modification and distribution are subject to the - * Boost Software License, Version 1.0. (See accompanying file - * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - */ -#ifndef DIFFERENTIAL_EVOLUTION -#define DIFFERENTIAL_EVOLUTION - -#include "FloatUtils.h" - -#include // for std::sort -#include -#include -#include -#include -#include -#include -#include -#include -#include // for std::false_type -#include -#include - -namespace NLR { - -template struct has_resize : std::false_type -{ -}; - -template -struct has_resize().resize( size_t{} ) )>> : std::true_type -{ -}; - -template constexpr bool has_resize_v = has_resize::value; - -template -void validate_bounds( ArgumentContainer const &lower_bounds, ArgumentContainer const &upper_bounds ) -{ - std::ostringstream oss; - - if ( lower_bounds.size() == 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The dimension of the problem cannot be zero."; - throw std::domain_error( oss.str() ); - } - - if ( upper_bounds.size() != lower_bounds.size() ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be the same number of lower bounds as upper bounds, but given "; - oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() - << " lower bounds."; - throw std::domain_error( oss.str() ); - } - - for ( size_t i = 0; i < lower_bounds.size(); ++i ) - { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - if ( lb > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be greater than or equal to the lower bound, but the " - "upper bound is " - << ub << " and the lower is " << lb << "."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( lb ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The lower bound must be finite, but got " << lb << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a " - "standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( ub ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be finite, but got " << ub << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a " - "standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - } -} - -template -std::vector random_initial_population( ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds, - size_t initial_population_size, - URBG &&gen ) -{ - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - constexpr bool has_resize = has_resize_v; - std::vector population( initial_population_size ); - auto const dimension = lower_bounds.size(); - - for ( size_t i = 0; i < population.size(); ++i ) - { - if constexpr ( has_resize ) - population[i].resize( dimension ); - else - { - // Argument type must be known at compile-time; like std::array: - if ( population[i].size() != dimension ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": For containers which do not have resize, the default size must be the " - "same as the dimension, "; - oss << "but the default container size is " << population[i].size() - << " and the dimension of the problem is " << dimension << "."; - oss << " The function argument container type is " - << typeid( ArgumentContainer ).name() << ".\n"; - throw std::runtime_error( oss.str() ); - } - } - } - - // Why don't we provide an option to initialize with (say) a Gaussian distribution? - // > If the optimum's location is fairly well known, - // > a Gaussian distribution may prove somewhat faster, although it - // > may also increase the probability that the population will converge prematurely. - // > In general, uniform distributions are preferred, since they best reflect - // > the lack of knowledge about the optimum's location. - // - Differential Evolution: A Practical Approach to Global Optimization - // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are - // preferable. So this is something that could be investigated and potentially improved. - std::uniform_real_distribution dis( DimensionlessReal( 0 ), - DimensionlessReal( 1 ) ); - for ( size_t i = 0; i < population.size(); ++i ) - { - for ( size_t j = 0; j < dimension; ++j ) - { - auto const &lb = lower_bounds[j]; - auto const &ub = upper_bounds[j]; - population[i][j] = lb + dis( gen ) * ( ub - lb ); - } - } - - return population; -} - -template -void validate_initial_guess( ArgumentContainer const &initial_guess, - ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds ) -{ - std::ostringstream oss; - auto const dimension = lower_bounds.size(); - - if ( initial_guess.size() != dimension ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The initial guess must have the same dimensions as the problem,"; - oss << ", but the problem size is " << dimension << " and the initial guess has " - << initial_guess.size() << " elements."; - throw std::domain_error( oss.str() ); - } - - for ( size_t i = 0; i < dimension; ++i ) - { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - - if ( !FloatUtils::isFinite( initial_guess[i] ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << ", the initial guess is " << initial_guess[i] - << ", make sure all elements of the initial guess are finite."; - throw std::domain_error( oss.str() ); - } - - if ( initial_guess[i] < lb || initial_guess[i] > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << " the initial guess " << initial_guess[i] - << " is not in the bounds [" << lb << ", " << ub << "]."; - throw std::domain_error( oss.str() ); - } - } -} - -// Return indices corresponding to the minimum function values. -template -std::vector best_indices( std::vector const &function_values ) -{ - const size_t n = function_values.size(); - std::vector indices( n ); - - for ( size_t i = 0; i < n; ++i ) - { - indices[i] = i; - } - - std::sort( indices.begin(), indices.end(), [&]( size_t a, size_t b ) { - if ( FloatUtils::isNan( function_values[a] ) ) - { - return false; - } - if ( FloatUtils::isNan( function_values[b] ) ) - { - return true; - } - return function_values[a] < function_values[b]; - } ); - - return indices; -} - -template -auto weighted_lehmer_mean( RandomAccessContainer const &values, - RandomAccessContainer const &weights ) -{ - if ( values.size() != weights.size() ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be the same number of weights as values, but got " << values.size() - << " values and " << weights.size() << " weights."; - throw std::logic_error( oss.str() ); - } - - if ( values.size() == 0 ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must at least one value provided."; - throw std::logic_error( oss.str() ); - } - - using Real = typename RandomAccessContainer::value_type; - Real numerator = 0; - Real denominator = 0; - - for ( size_t i = 0; i < values.size(); ++i ) - { - if ( weights[i] < 0 || !FloatUtils::isFinite( weights[i] ) ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": All weights must be positive and finite, but got received weight " - << weights[i] << " at index " << i << " of " << weights.size() << "."; - throw std::domain_error( oss.str() ); - } - - Real tmp = weights[i] * values[i]; - numerator += tmp * values[i]; - denominator += tmp; - } - return numerator / denominator; -} - -// Storn, R., Price, K. (1997). Differential evolution-a simple and efficient heuristic for global -// optimization over continuous spaces. Journal of global optimization, 11, 341-359. See: -// https://www.cp.eng.chula.ac.th/~prabhas//teaching/ec/ec2012/storn_price_de.pdf - -// We provide the parameters in a struct-there are too many of them and they are too unwieldy to -// pass individually: -template struct differential_evolution_parameters -{ - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - ArgumentContainer lower_bounds; - ArgumentContainer upper_bounds; - - // mutation factor is also called scale factor or just F in the literature: - DimensionlessReal mutation_factor = static_cast( 0.65 ); - DimensionlessReal crossover_probability = static_cast( 0.5 ); - - // Population in each generation: - size_t NP = 500; - size_t max_generations = 1000; - ArgumentContainer const *initial_guess = nullptr; - unsigned threads = std::thread::hardware_concurrency(); -}; - -template -void validate_differential_evolution_parameters( - differential_evolution_parameters const &de_params ) -{ - std::ostringstream oss; - validate_bounds( de_params.lower_bounds, de_params.upper_bounds ); - if ( de_params.NP < 4 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The population size must be at least 4, but requested population size of " - << de_params.NP << "."; - throw std::invalid_argument( oss.str() ); - } - - // From: "Differential Evolution: A Practical Approach to Global Optimization (Natural Computing - // Series)" > The scale factor, F in (0,1+), is a positive real number that controls the rate at - // which the population evolves. > While there is no upper limit on F, effective values are - // seldom greater than 1.0. - // ... - // Also see "Limits on F", Section 2.5.1: - // > This discontinuity at F = 1 reduces the number of mutants by half and can result in erratic - // convergence... - auto F = de_params.mutation_factor; - if ( FloatUtils::isNan( F ) || F >= 1 || F <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": F in (0, 1) is required, but got F=" << F << "."; - throw std::domain_error( oss.str() ); - } - - if ( de_params.max_generations < 1 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be at least one generation."; - throw std::invalid_argument( oss.str() ); - } - - if ( de_params.initial_guess ) - { - validate_initial_guess( - *de_params.initial_guess, de_params.lower_bounds, de_params.upper_bounds ); - } - - if ( de_params.threads == 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be at least one thread."; - throw std::invalid_argument( oss.str() ); - } -} - -template -ArgumentContainer differential_evolution( - const Func cost_function, - differential_evolution_parameters const &de_params, - URBG &gen, - std::invoke_result_t target_value = - std::numeric_limits>::quiet_NaN(), - std::atomic *cancellation = nullptr, - std::vector>> - *queries = nullptr, - std::atomic> *current_minimum_cost = nullptr ) -{ - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - using ResultType = std::invoke_result_t; - - validate_differential_evolution_parameters( de_params ); - const size_t dimension = de_params.lower_bounds.size(); - auto NP = de_params.NP; - auto population = - random_initial_population( de_params.lower_bounds, de_params.upper_bounds, NP, gen ); - - if ( de_params.initial_guess ) - { - population[0] = *de_params.initial_guess; - } - - std::vector cost( NP, std::numeric_limits::quiet_NaN() ); - std::atomic target_attained = false; - - // This mutex is only used if the queries are stored: - std::mutex mt; - - std::vector thread_pool; - auto const threads = de_params.threads; - for ( size_t j = 0; j < threads; ++j ) - { - // Note that if some members of the population take way longer to compute, - // then this parallelization strategy is very suboptimal. - // However, we tried using std::async (which should be robust to this particular problem), - // but the overhead was just totally unacceptable on ARM Macs (the only platform tested). - // As the economists say "there are no solutions, only tradeoffs". - - thread_pool.emplace_back( [&, j]() { - for ( size_t i = j; i < cost.size(); i += threads ) - { - cost[i] = cost_function( population[i] ); - if ( current_minimum_cost && cost[i] < *current_minimum_cost ) - { - *current_minimum_cost = cost[i]; - } - if ( queries ) - { - std::scoped_lock lock( mt ); - queries->push_back( std::make_pair( population[i], cost[i] ) ); - } - - if ( !FloatUtils::isNan( target_value ) && cost[i] <= target_value ) - { - target_attained = true; - } - } - } ); - } - - for ( auto &thread : thread_pool ) - { - thread.join(); - } - - std::vector trial_vectors( NP ); - for ( size_t i = 0; i < NP; ++i ) - { - if constexpr ( has_resize_v ) - { - trial_vectors[i].resize( dimension ); - } - } - - std::vector thread_generators( threads ); - for ( size_t j = 0; j < threads; ++j ) - { - thread_generators[j].seed( gen() ); - } - - // std::vector isn't threadsafe! - std::vector updated_indices( NP, 0 ); - - for ( size_t generation = 0; generation < de_params.max_generations; ++generation ) - { - if ( cancellation && *cancellation ) - { - break; - } - if ( target_attained ) - { - break; - } - - thread_pool.resize( 0 ); - for ( size_t j = 0; j < threads; ++j ) - { - thread_pool.emplace_back( [&, j]() { - auto &tlg = thread_generators[j]; - std::uniform_real_distribution unif01( DimensionlessReal( 0 ), - DimensionlessReal( 1 ) ); - - for ( size_t i = j; i < cost.size(); i += threads ) - { - if ( target_attained ) - { - return; - } - if ( cancellation && *cancellation ) - { - return; - } - - size_t r1, r2, r3; - do - { - r1 = tlg() % NP; - } - while ( r1 == i ); - - do - { - r2 = tlg() % NP; - } - while ( r2 == i || r2 == r1 ); - - do - { - r3 = tlg() % NP; - } - while ( r3 == i || r3 == r2 || r3 == r1 ); - - - for ( size_t k = 0; k < dimension; ++k ) - { - // See equation (4) of the reference: - auto guaranteed_changed_idx = tlg() % dimension; - if ( unif01( tlg ) < de_params.crossover_probability || - k == guaranteed_changed_idx ) - { - auto tmp = - population[r1][k] + de_params.mutation_factor * - ( population[r2][k] - population[r3][k] ); - auto const &lb = de_params.lower_bounds[k]; - auto const &ub = de_params.upper_bounds[k]; - // Some others recommend regenerating the indices rather than clamping; - // I dunno seems like it could get stuck regenerating . . . - trial_vectors[i][k] = std::clamp( tmp, lb, ub ); - } - else - { - trial_vectors[i][k] = population[i][k]; - } - } - - auto const trial_cost = cost_function( trial_vectors[i] ); - if ( FloatUtils::isNan( trial_cost ) ) - { - continue; - } - if ( queries ) - { - std::scoped_lock lock( mt ); - queries->push_back( std::make_pair( trial_vectors[i], trial_cost ) ); - } - - if ( trial_cost < cost[i] || FloatUtils::isNan( cost[i] ) ) - { - cost[i] = trial_cost; - if ( !FloatUtils::isNan( target_value ) && cost[i] <= target_value ) - { - target_attained = true; - } - - if ( current_minimum_cost && cost[i] < *current_minimum_cost ) - { - *current_minimum_cost = cost[i]; - } - - // Can't do this! It's a race condition! - // population[i] = trial_vectors[i]; - // Instead mark all the indices that need to be updated: - updated_indices[i] = 1; - } - } - } ); - } - for ( auto &thread : thread_pool ) - { - thread.join(); - } - - for ( size_t i = 0; i < NP; ++i ) - { - if ( updated_indices[i] ) - { - population[i] = trial_vectors[i]; - updated_indices[i] = 0; - } - } - } - - auto it = std::min_element( cost.begin(), cost.end() ); - return population[std::distance( cost.begin(), it )]; -} - -} // namespace NLR -#endif From 5a3a72a8a34902fc74b1c8b6c78c36fc2a7092de Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 3 Feb 2025 13:25:53 +0200 Subject: [PATCH 49/81] Delete src/nlr/GradientDescent.h --- src/nlr/GradientDescent.h | 353 -------------------------------------- 1 file changed, 353 deletions(-) delete mode 100644 src/nlr/GradientDescent.h diff --git a/src/nlr/GradientDescent.h b/src/nlr/GradientDescent.h deleted file mode 100644 index f582711397..0000000000 --- a/src/nlr/GradientDescent.h +++ /dev/null @@ -1,353 +0,0 @@ -/* - * Copyright Nick Thompson, 2024 - * Use, modification and distribution are subject to the - * Boost Software License, Version 1.0. (See accompanying file - * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - */ -#ifndef GRADIENT_DESCENT -#define GRADIENT_DESCENT - -#include "FloatUtils.h" -#include "GlobalConfiguration.h" - -#include // for std::sort -#include -#include -#include -#include -#include -#include -#include -#include // for std::false_type -#include -#include - -namespace NLR { - -template struct has_resize : std::false_type -{ -}; - -template -struct has_resize().resize( size_t{} ) )>> : std::true_type -{ -}; - -template constexpr bool has_resize_v = has_resize::value; - -template -void validate_bounds( ArgumentContainer const &lower_bounds, ArgumentContainer const &upper_bounds ) -{ - std::ostringstream oss; - - if ( lower_bounds.size() == 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The dimension of the problem cannot be zero."; - throw std::domain_error( oss.str() ); - } - - if ( upper_bounds.size() != lower_bounds.size() ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be the same number of lower bounds as upper bounds, but given "; - oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() - << " lower bounds."; - throw std::domain_error( oss.str() ); - } - - for ( size_t i = 0; i < lower_bounds.size(); ++i ) - { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - if ( lb > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be greater than or equal to the lower bound, but the " - "upper bound is " - << ub << " and the lower is " << lb << "."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( lb ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The lower bound must be finite, but got " << lb << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a " - "standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( ub ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be finite, but got " << ub << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a " - "standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - } -} - -template -std::vector random_initial_population( ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds, - size_t initial_population_size, - URBG &&gen ) -{ - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - constexpr bool has_resize = has_resize_v; - std::vector population( initial_population_size ); - auto const dimension = lower_bounds.size(); - - for ( size_t i = 0; i < population.size(); ++i ) - { - if constexpr ( has_resize ) - population[i].resize( dimension ); - else - { - // Argument type must be known at compile-time; like std::array: - if ( population[i].size() != dimension ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": For containers which do not have resize, the default size must be the " - "same as the dimension, "; - oss << "but the default container size is " << population[i].size() - << " and the dimension of the problem is " << dimension << "."; - oss << " The function argument container type is " - << typeid( ArgumentContainer ).name() << ".\n"; - throw std::runtime_error( oss.str() ); - } - } - } - - // Why don't we provide an option to initialize with (say) a Gaussian distribution? - // > If the optimum's location is fairly well known, - // > a Gaussian distribution may prove somewhat faster, although it - // > may also increase the probability that the population will converge prematurely. - // > In general, uniform distributions are preferred, since they best reflect - // > the lack of knowledge about the optimum's location. - // - Differential Evolution: A Practical Approach to Global Optimization - // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are - // preferable. So this is something that could be investigated and potentially improved. - std::uniform_real_distribution dis( DimensionlessReal( 0 ), - DimensionlessReal( 1 ) ); - for ( size_t i = 0; i < population.size(); ++i ) - { - for ( size_t j = 0; j < dimension; ++j ) - { - auto const &lb = lower_bounds[j]; - auto const &ub = upper_bounds[j]; - population[i][j] = lb + dis( gen ) * ( ub - lb ); - } - } - - return population; -} - -template -void validate_initial_guess( ArgumentContainer const &initial_guess, - ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds ) -{ - std::ostringstream oss; - auto const dimension = lower_bounds.size(); - - if ( initial_guess.size() != dimension ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The initial guess must have the same dimensions as the problem,"; - oss << ", but the problem size is " << dimension << " and the initial guess has " - << initial_guess.size() << " elements."; - throw std::domain_error( oss.str() ); - } - - for ( size_t i = 0; i < dimension; ++i ) - { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - - if ( !FloatUtils::isFinite( initial_guess[i] ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << ", the initial guess is " << initial_guess[i] - << ", make sure all elements of the initial guess are finite."; - throw std::domain_error( oss.str() ); - } - - if ( initial_guess[i] < lb || initial_guess[i] > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << " the initial guess " << initial_guess[i] - << " is not in the bounds [" << lb << ", " << ub << "]."; - throw std::domain_error( oss.str() ); - } - } -} - -// Single-Threaded Gradient Descent - -// We provide the parameters in a struct-there are too many of them and they are too unwieldy to -// pass individually: -template struct gradient_descent_parameters -{ - using Real = typename ArgumentContainer::value_type; - ArgumentContainer lower_bounds; - ArgumentContainer upper_bounds; - - Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; - size_t max_iterations = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; - Real epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; - bool maximize = false; - Real weight_decay = 0; - Real lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; - ArgumentContainer const *initial_guess = nullptr; -}; - -template -void validate_gradient_descent_parameters( - gradient_descent_parameters const &cd_params ) -{ - std::ostringstream oss; - validate_bounds( cd_params.lower_bounds, cd_params.upper_bounds ); - - if ( FloatUtils::isNan( cd_params.step_size ) || cd_params.step_size <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": step_size > 0 is required, but got step_size=" << cd_params.step_size << "."; - throw std::domain_error( oss.str() ); - } - - if ( FloatUtils::isNan( cd_params.epsilon ) || cd_params.epsilon <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": epsilon > 0 is required, but got epsilon=" << cd_params.epsilon << "."; - throw std::domain_error( oss.str() ); - } - - if ( cd_params.max_iterations < 1 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be at least one generation."; - throw std::invalid_argument( oss.str() ); - } - - if ( FloatUtils::isNan( cd_params.lr ) || cd_params.lr <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": lr > 0 is required, but got lr=" << cd_params.lr << "."; - throw std::domain_error( oss.str() ); - } - - if ( FloatUtils::isNan( cd_params.weight_decay ) || cd_params.weight_decay < 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": weight_decay >= 0 is required, but got weight_decay=" << cd_params.weight_decay - << "."; - throw std::domain_error( oss.str() ); - } - - if ( cd_params.initial_guess ) - { - validate_initial_guess( - *cd_params.initial_guess, cd_params.lower_bounds, cd_params.upper_bounds ); - } -} - -template -ArgumentContainer gradient_descent( - const Func cost_function, - gradient_descent_parameters const &cd_params, - URBG &gen, - std::invoke_result_t target_value, - bool *cancellation = nullptr, - std::vector>> - *queries = nullptr, - std::invoke_result_t *current_minimum_cost = nullptr ) -{ - using ResultType = std::invoke_result_t; - - validate_gradient_descent_parameters( cd_params ); - const size_t dimension = cd_params.lower_bounds.size(); - auto step_size = cd_params.step_size; - auto max_iterations = cd_params.max_iterations; - auto maximize = cd_params.maximize; - auto epsilon = cd_params.epsilon; - auto lr = cd_params.lr; - auto weight_decay = cd_params.weight_decay; - auto lower_bounds = cd_params.lower_bounds; - auto upper_bounds = cd_params.upper_bounds; - auto guess = - random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); - - if ( cd_params.initial_guess ) - { - guess[0] = *cd_params.initial_guess; - } - - std::vector candidates( dimension ); - std::vector gradient( dimension ); - ArgumentContainer current_minimum = guess[0]; - - for ( size_t i = 0; i < max_iterations; ++i ) - { - double current_cost = cost_function( guess[0] ); - for ( size_t j = 0; j < dimension; ++j ) - { - candidates[j] = guess[0]; - candidates[j][j] += step_size; - - if ( candidates[j][j] > upper_bounds[j] || candidates[j][j] < lower_bounds[j] ) - { - gradient[j] = 0; - continue; - } - - size_t sign = ( maximize == false ? 1 : -1 ); - double cost = cost_function( candidates[j] ); - gradient[j] = sign * ( cost - current_cost ) / step_size + weight_decay * guess[0][j]; - if ( !FloatUtils::isNan( target_value ) && cost <= target_value ) - { - guess[0] = candidates[j]; - break; - } - } - - bool gradient_is_zero = true; - for ( size_t j = 0; j < dimension; ++j ) - { - if ( FloatUtils::abs( gradient[j] ) > epsilon ) - { - gradient_is_zero = false; - } - } - if ( gradient_is_zero ) - { - break; - } - - for ( size_t j = 0; j < dimension; ++j ) - { - guess[0][j] -= lr * gradient[j]; - - if ( guess[0][j] > upper_bounds[j] ) - { - guess[0][j] = upper_bounds[j]; - } - - if ( guess[0][j] < lower_bounds[j] ) - { - guess[0][j] = lower_bounds[j]; - } - } - } - - return guess[0]; -} - -} // namespace NLR -#endif From b093daca8b7b74135e77c683a1afc253ca04918c Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 3 Feb 2025 13:27:01 +0200 Subject: [PATCH 50/81] Delete src/nlr/MILPSolverBoundTighteningType.h --- src/nlr/MILPSolverBoundTighteningType.h | 46 ------------------------- 1 file changed, 46 deletions(-) delete mode 100644 src/nlr/MILPSolverBoundTighteningType.h diff --git a/src/nlr/MILPSolverBoundTighteningType.h b/src/nlr/MILPSolverBoundTighteningType.h deleted file mode 100644 index 760f82e8d1..0000000000 --- a/src/nlr/MILPSolverBoundTighteningType.h +++ /dev/null @@ -1,46 +0,0 @@ -/********************* */ -/*! \file MILPSolverBoundTighteningType.h -** \verbatim -** Top contributors (to current version): -** Guy Katz -** This file is part of the Marabou project. -** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS -** in the top-level source directory) and their institutional affiliations. -** All rights reserved. See the file COPYING in the top-level source -** directory for licensing information.\endverbatim -** -** [[ Add lengthier description here ]] - -**/ - -#ifndef __MILPSolverBoundTighteningType_h__ -#define __MILPSolverBoundTighteningType_h__ - -/* - MILP solver bound tightening options -*/ -enum class MILPSolverBoundTighteningType { - // Only encode pure linear constraints in the underlying - // solver, in a way that over-approximates the query - LP_RELAXATION = 0, - LP_RELAXATION_INCREMENTAL = 1, - // Encode linear and integer constraints in the underlying - // solver, in a way that completely captures the query but is - // more expensive to solve - MILP_ENCODING = 2, - MILP_ENCODING_INCREMENTAL = 3, - // Encode full queries and tries to fix relus until fix point - ITERATIVE_PROPAGATION = 4, - // Perform backward analysis - BACKWARD_ANALYSIS_ONCE = 5, - BACKWARD_ANALYSIS_CONVERGE = 6, - // Perform backward analysis using the INVPROP Algorithm (arXiv:2302.01404v4 [cs.LG]) - BACKWARD_ANALYSIS_INVPROP = 7, - // Perform backward analysis using the PreimageApproximation Algorithm (arXiv:2305.03686v4 - // [cs.SE]) - BACKWARD_ANALYSIS_PREIMAGE_APPROX = 8, - // Option to have no MILP bound tightening performed - NONE = 10, -}; - -#endif // __MILPSolverBoundTighteningType_h__ From bf8513bdeec1682f7f30f4a451e34aa17907b66d Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 3 Feb 2025 13:31:02 +0200 Subject: [PATCH 51/81] Add files via upload --- src/nlr/LPFormulator.cpp | 180 +--------------------- src/nlr/LPFormulator.h | 19 +-- src/nlr/Layer.cpp | 256 +++++++++++++++++++++++++++++-- src/nlr/Layer.h | 21 ++- src/nlr/NetworkLevelReasoner.cpp | 13 +- 5 files changed, 268 insertions(+), 221 deletions(-) diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index c9ff9eb6d6..4f5749c9e7 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -337,120 +337,13 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map throw InfeasibleQueryException(); } -void LPFormulator::optimizeBoundsWithInvprop( const Map &layers ) +void LPFormulator::optimizeBoundsWithPreimageApproximation( Map &layers ) { -} - -void LPFormulator::optimizeBoundsWithPreimageApproximation( const Map &layers ) -{ - // Define EstimateVolume bind which captures layers and only receives coeffs as its input. - auto EstimateVolumeBind = std::bind( EstimateVolume, layers, std::placeholders::_1 ); - - // Search over coeffs in [0, 1]^number_of_parameters. Enforce single-threaded optimization. - auto opt_params = gradient_descent_parameters>(); - unsigned num = getNumberOfParameters( layers ); - opt_params.lower_bounds.resize( num, 0 ); - opt_params.upper_bounds.resize( num, 1 ); - double value_to_reach = 0; - std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); - - // Find optimal coeffs and run final parameterised symbolic bound tightening + backward LP - // relaxation. - auto optimal_coeffs = gradient_descent( EstimateVolumeBind, opt_params, rng, value_to_reach ); - - // First, run parameterised symbolic bound propagation. - unsigned i = 0; - Map> layerIndicesToParameters = - getParametersForLayers( layers, optimal_coeffs ); - for ( auto pair : layers ) - { - unsigned layerIndex = pair.first; - Layer *layer = layers[layerIndex]; - std::vector currentLayerCoeffs = layerIndicesToParameters[layerIndex]; - if ( i == layers.size() - 1 ) - { - layer->computeParameterisedSymbolicBounds( currentLayerCoeffs, true, false ); - } - else - { - layer->computeParameterisedSymbolicBounds( currentLayerCoeffs ); - } - i++; - } - - // Finally, run parameterised forward LP and parameterised backward LP. + std::vector optimal_coeffs = layers[0]->OptimalParameterisedSymbolicBoundTightening(); optimizeBoundsWithLpRelaxation( layers, false, optimal_coeffs ); optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs ); } -double LPFormulator::EstimateVolume( const Map &layers, - std::vector coeffs ) -{ - // First, run parameterised symbolic bound propagation. - Map> layerIndicesToParameters = - getParametersForLayers( layers, coeffs ); - for ( auto pair : layers ) - { - unsigned layerIndex = pair.first; - Layer *layer = layers[layerIndex]; - std::vector currentLayerCoeffs = layerIndicesToParameters[layerIndex]; - layer->computeParameterisedSymbolicBounds( currentLayerCoeffs ); - } - - std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); - double log_box_volume = 0; - double sigmoid_sum = 0; - - unsigned inputLayerIndex = 0; - unsigned outputLayerIndex = layers.size() - 1; - Layer *inputLayer = layers[inputLayerIndex]; - Layer *outputLayer = layers[outputLayerIndex]; - - // Calculate volume of input variables' bounding box. - for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) - { - if ( inputLayer->neuronEliminated( index ) ) - continue; - - double lb = inputLayer->getLb( index ); - double ub = inputLayer->getUb( index ); - - log_box_volume += std::log( ub - lb ); - } - - for ( unsigned i = 0; i < GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; ++i ) - { - // Sample input point from known bounds. - Map point; - for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) - { - if ( inputLayer->neuronEliminated( index ) ) - continue; - - NeuronIndex neuron( inputLayerIndex, index ); - double lb = inputLayer->getLb( index ); - double ub = inputLayer->getUb( index ); - std::uniform_real_distribution<> dis( lb, ub ); - point.insert( neuron, dis( rng ) ); - } - - // Calculate sigmoid of maximum margin from output symbolic bounds. - double max_margin = 0; - for ( unsigned index = 0; index < outputLayer->getSize(); ++index ) - { - if ( outputLayer->neuronEliminated( index ) ) - continue; - - double margin = outputLayer->calculateDifferenceFromSymbolic( point, index ); - max_margin = std::max( max_margin, margin ); - } - sigmoid_sum += SigmoidConstraint::sigmoid( max_margin ); - } - - return std::exp( log_box_volume + std::log( sigmoid_sum ) ) / - GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; -} - void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ) { @@ -777,7 +670,7 @@ void LPFormulator::createLPRelaxation( const Map &layers, else { Map> layerIndicesToParameters = - getParametersForLayers( layers, coeffs ); + layer.second->getParametersForLayers( layers, coeffs ); std::vector currentLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; addLayerToParameterisedModel( gurobi, layer.second, false, currentLayerCoeffs ); } @@ -810,7 +703,7 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers else { Map> layerIndicesToParameters = - getParametersForLayers( layers, coeffs ); + currentLayer->getParametersForLayers( layers, coeffs ); std::vector currentLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; addLayerToParameterisedModel( gurobi, currentLayer, true, currentLayerCoeffs ); @@ -1827,71 +1720,6 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, } } -unsigned LPFormulator::getNumberOfParameters( const Map &layers ) -{ - unsigned index = 0; - for ( auto pair : layers ) - { - unsigned layerIndex = pair.first; - Layer *layer = layers[layerIndex]; - switch ( layer->getLayerType() ) - { - case Layer::RELU: - case Layer::LEAKY_RELU: - index++; - break; - - case Layer::SIGN: - case Layer::BILINEAR: - index += 2; - break; - - default: - break; - } - } - return index; -} - -Map> -LPFormulator::getParametersForLayers( const Map &layers, - std::vector coeffs ) -{ - unsigned index = 0; - Map> layerIndicesToParameters; - for ( auto pair : layers ) - { - unsigned layerIndex = pair.first; - Layer *layer = layers[layerIndex]; - std::vector parameters = {}; - switch ( layer->getLayerType() ) - { - case Layer::RELU: - case Layer::LEAKY_RELU: - { - parameters = std::vector( coeffs.begin() + index, coeffs.begin() + index + 1 ); - layerIndicesToParameters.insert( layerIndex, parameters ); - index++; - } - break; - - case Layer::SIGN: - case Layer::BILINEAR: - { - parameters = std::vector( coeffs.begin() + index, coeffs.begin() + index + 2 ); - layerIndicesToParameters.insert( layerIndex, parameters ); - index += 2; - } - break; - - default: - layerIndicesToParameters.insert( layerIndex, parameters ); - break; - } - } - return layerIndicesToParameters; -} - void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index c9164f0011..d89af4c331 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -16,7 +16,6 @@ #ifndef __LPFormulator_h__ #define __LPFormulator_h__ -#include "GradientDescent.h" #include "GurobiWrapper.h" #include "LayerOwner.h" #include "Map.h" @@ -56,8 +55,7 @@ class LPFormulator : public ParallelSolver void optimizeBoundsWithLpRelaxation( const Map &layers, bool backward = false, std::vector coeffs = {} ); - void optimizeBoundsWithInvprop( const Map &layers ); - void optimizeBoundsWithPreimageApproximation( const Map &layers ); + void optimizeBoundsWithPreimageApproximation( Map &layers ); void optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ); void optimizeBoundsWithIncrementalLpRelaxation( const Map &layers ); @@ -91,16 +89,6 @@ class LPFormulator : public ParallelSolver void addLayerToModel( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - - // Get total number of optimizable parameters for parameterised SBT/LP relaxation. - unsigned getNumberOfParameters( const Map &layers ); - - - // Get map containing vector of optimizable parameters for parameterised SBT/LP relaxation for - // every layer index. - static Map> - getParametersForLayers( const Map &layers, std::vector coeffs ); - private: LayerOwner *_layerOwner; bool _cutoffInUse; @@ -176,11 +164,6 @@ class LPFormulator : public ParallelSolver std::vector coeffs ); - // Estimate Volume of parameterised LP relaxations. - static double EstimateVolume( const Map &layers, - std::vector coeffs ); - - /* Optimize for the min/max value of variableName with respect to the constraints encoded in gurobi. If the query is infeasible, *infeasible is set to true. diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 0ebb7872df..e796eda2d0 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -75,7 +75,9 @@ void Layer::allocateMemory() _inputLayerSize = ( _type == INPUT ) ? _size : _layerOwner->getLayer( 0 )->getSize(); if ( Options::get()->getSymbolicBoundTighteningType() == - SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING ) + SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) { _symbolicLb = new double[_size * _inputLayerSize]; _symbolicUb = new double[_size * _inputLayerSize]; @@ -3392,21 +3394,6 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } -double Layer::calculateDifferenceFromSymbolic( Map &point, unsigned i ) const -{ - double lowerSum = _symbolicLowerBias[i]; - double upperSum = _symbolicUpperBias[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - NeuronIndex neuron( 0, j ); - lowerSum += _symbolicLb[j * _size + i] * point[neuron]; - upperSum += _symbolicUb[j * _size + i] * point[neuron]; - } - - return std::max( _ub[i] - upperSum, lowerSum - _lb[i] ); -} - void Layer::computeParameterisedSymbolicBounds( std::vector coeffs, bool receivePolygonal, bool receive ) @@ -4335,6 +4322,243 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( std::vector c } } +std::vector Layer::OptimalParameterisedSymbolicBoundTightening() +{ + const Map &layers = _layerOwner->getLayerIndexToLayer(); + + // Search over coeffs in [0, 1]^number_of_parameters with projected grdient descent. + unsigned max_iterations = + GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + double step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + double epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; + double weight_decay = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY; + double lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + bool maximize = false; + + unsigned dimension = getNumberOfParameters( layers ); + std::vector lower_bounds; + std::vector upper_bounds; + std::vector guess; + lower_bounds.resize( dimension, 0 ); + upper_bounds.resize( dimension, 1 ); + guess.resize( dimension, 0 ); + + // Initialize initial guess uniformly. + std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); + std::uniform_real_distribution dis( 0, 1 ); + for ( size_t j = 0; j < dimension; ++j ) + { + double lb = lower_bounds[j]; + double ub = upper_bounds[j]; + guess[j] = lb + dis( rng ) * ( ub - lb ); + } + + std::vector> candidates( dimension ); + std::vector gradient( dimension ); + std::vector current_minimum = guess; + + for ( size_t i = 0; i < max_iterations; ++i ) + { + double current_cost = EstimateVolume( guess ); + for ( size_t j = 0; j < dimension; ++j ) + { + candidates[j] = guess; + candidates[j][j] += step_size; + + if ( candidates[j][j] > upper_bounds[j] || candidates[j][j] < lower_bounds[j] ) + { + gradient[j] = 0; + continue; + } + + size_t sign = ( maximize == false ? 1 : -1 ); + double cost = EstimateVolume( candidates[j] ); + gradient[j] = sign * ( cost - current_cost ) / step_size + weight_decay * guess[j]; + } + + bool gradient_is_zero = true; + for ( size_t j = 0; j < dimension; ++j ) + { + if ( FloatUtils::abs( gradient[j] ) > epsilon ) + { + gradient_is_zero = false; + } + } + if ( gradient_is_zero ) + { + break; + } + + for ( size_t j = 0; j < dimension; ++j ) + { + guess[j] -= lr * gradient[j]; + + if ( guess[j] > upper_bounds[j] ) + { + guess[j] = upper_bounds[j]; + } + + if ( guess[j] < lower_bounds[j] ) + { + guess[j] = lower_bounds[j]; + } + } + } + + return guess; +} + +double Layer::EstimateVolume( std::vector coeffs ) +{ + // First, run parameterised symbolic bound propagation. + const Map &layers = _layerOwner->getLayerIndexToLayer(); + Map> layerIndicesToParameters = + getParametersForLayers( layers, coeffs ); + for ( unsigned i = 0; i < layers.size(); ++i ) + { + ASSERT( layers.exists( i ) ); + std::vector currentLayerCoeffs = layerIndicesToParameters[i]; + layers[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); + } + + std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); + double log_box_volume = 0; + double sigmoid_sum = 0; + + unsigned inputLayerIndex = 0; + unsigned outputLayerIndex = layers.size() - 1; + Layer *inputLayer = layers[inputLayerIndex]; + Layer *outputLayer = layers[outputLayerIndex]; + + // Calculate volume of input variables' bounding box. + for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) + { + if ( inputLayer->neuronEliminated( index ) ) + continue; + + double lb = inputLayer->getLb( index ); + double ub = inputLayer->getUb( index ); + + if ( lb == ub ) + return 0; + + log_box_volume += std::log( ub - lb ); + } + + for ( unsigned i = 0; i < GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; ++i ) + { + // Sample input point from known bounds. + Map point; + for ( unsigned j = 0; j < inputLayer->getSize(); ++j ) + { + if ( inputLayer->neuronEliminated( j ) ) + { + point.insert( j, 0 ); + } + else + { + double lb = inputLayer->getLb( j ); + double ub = inputLayer->getUb( j ); + std::uniform_real_distribution<> dis( lb, ub ); + point.insert( j, dis( rng ) ); + } + } + + // Calculate sigmoid of maximum margin from output symbolic bounds. + double max_margin = 0; + for ( unsigned j = 0; j < outputLayer->getSize(); ++j ) + { + if ( outputLayer->neuronEliminated( j ) ) + continue; + + double margin = outputLayer->calculateDifferenceFromSymbolic( point, j ); + max_margin = std::max( max_margin, margin ); + } + sigmoid_sum += SigmoidConstraint::sigmoid( max_margin ); + } + + return std::exp( log_box_volume + std::log( sigmoid_sum ) ) / + GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; +} + +double Layer::calculateDifferenceFromSymbolic( Map &point, unsigned i ) const +{ + double lowerSum = _symbolicLowerBias[i]; + double upperSum = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + lowerSum += _symbolicLb[j * _size + i] * point[j]; + upperSum += _symbolicUb[j * _size + i] * point[j]; + } + + return std::max( _ub[i] - upperSum, lowerSum - _lb[i] ); +} + +unsigned Layer::getNumberOfParameters( const Map &layers ) +{ + unsigned index = 0; + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + switch ( layer->getLayerType() ) + { + case Layer::RELU: + case Layer::LEAKY_RELU: + index++; + break; + + case Layer::SIGN: + case Layer::BILINEAR: + index += 2; + break; + + default: + break; + } + } + return index; +} + +Map> +Layer::getParametersForLayers( const Map &layers, std::vector coeffs ) +{ + unsigned index = 0; + Map> layerIndicesToParameters; + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + std::vector parameters = {}; + switch ( layer->getLayerType() ) + { + case Layer::RELU: + case Layer::LEAKY_RELU: + { + parameters = std::vector( coeffs.begin() + index, coeffs.begin() + index + 1 ); + layerIndicesToParameters.insert( layerIndex, parameters ); + index++; + } + break; + + case Layer::SIGN: + case Layer::BILINEAR: + { + parameters = std::vector( coeffs.begin() + index, coeffs.begin() + index + 2 ); + layerIndicesToParameters.insert( layerIndex, parameters ); + index += 2; + } + break; + + default: + layerIndicesToParameters.insert( layerIndex, parameters ); + break; + } + } + return layerIndicesToParameters; +} + double Layer::softmaxLSELowerBound( const Vector &inputs, const Vector &inputLbs, const Vector &inputUbs, diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index fef75a1f8a..3b94bec089 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -136,12 +136,22 @@ class Layer void obtainCurrentBounds( const Query &inputQuery ); void obtainCurrentBounds(); + void computeIntervalArithmeticBounds(); void computeSymbolicBounds(); void computeParameterisedSymbolicBounds( std::vector coeffs, bool receivePolygonal = false, bool receive = false ); - double calculateDifferenceFromSymbolic( Map &point, unsigned i ) const; - void computeIntervalArithmeticBounds(); + + // Get total number of optimizable parameters for parameterised SBT relaxation. + unsigned getNumberOfParameters( const Map &layers ); + + // Get map containing vector of optimizable parameters for parameterised SBT relaxation for + // every layer index. + Map> getParametersForLayers( const Map &layers, + std::vector coeffs ); + + // Return optimizable parameters which minimize parameterised SBT bounds' volume. + std::vector OptimalParameterisedSymbolicBoundTightening(); /* Preprocessing functionality: variable elimination and reindexing @@ -305,6 +315,13 @@ class Layer void computeParameterisedSymbolicBoundsForLeakyRelu( std::vector coeffs, bool receive ); void computeParameterisedSymbolicBoundsForBilinear( std::vector coeffs, bool receive ); + // Estimate Volume of parameterised symbolic bound tightening. + double EstimateVolume( std::vector coeffs ); + + // Return difference between given point and upper and lower bounds determined by parameterised + // SBT relaxation. + double calculateDifferenceFromSymbolic( Map &point, unsigned i ) const; + /* Helper functions for interval bound tightening */ diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index 8b82ae140b..4cf1371b98 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -224,13 +224,11 @@ void NetworkLevelReasoner::symbolicBoundPropagation() void NetworkLevelReasoner::parameterisedSymbolicBoundPropagation( std::vector coeffs ) { Map> layerIndicesToParameters = - LPFormulator::getParametersForLayers( _layerIndexToLayer, coeffs ); - for ( auto pair : _layerIndexToLayer ) + _layerIndexToLayer[0]->getParametersForLayers( _layerIndexToLayer, coeffs ); + for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) { - unsigned layerIndex = pair.first; - Layer *layer = _layerIndexToLayer[layerIndex]; - std::vector currentLayerCoeffs = layerIndicesToParameters[layerIndex]; - layer->computeParameterisedSymbolicBounds( currentLayerCoeffs ); + std::vector currentLayerCoeffs = layerIndicesToParameters[i]; + _layerIndexToLayer[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); } } @@ -251,9 +249,6 @@ void NetworkLevelReasoner::lpRelaxationPropagation() Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE ) lpFormulator.optimizeBoundsWithLpRelaxation( _layerIndexToLayer, true ); - else if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP ) - lpFormulator.optimizeBoundsWithInvprop( _layerIndexToLayer ); else if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) lpFormulator.optimizeBoundsWithPreimageApproximation( _layerIndexToLayer ); From 3b2fe08c9c66db77d3a7abefe1d30f33acddeeb4 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 3 Feb 2025 13:33:31 +0200 Subject: [PATCH 52/81] Add files via upload --- src/nlr/tests/Test_NetworkLevelReasoner.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index b23deca8cf..1e9fc8eb92 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -13454,7 +13454,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void updateTableau( MockTableau &tableau, List &tightenings ) { - ASSERT( tableau ); for ( const auto &tightening : tightenings ) { if ( tightening._type == Tightening::LB ) From 20c35774d7e31898e06c6e652a531e59c0ff54c1 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 3 Feb 2025 13:36:04 +0200 Subject: [PATCH 53/81] Add files via upload --- src/engine/Engine.cpp | 13 ++++++++++--- src/engine/MILPSolverBoundTighteningType.h | 4 +--- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index e137c3cbb9..332e95f70e 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -1594,7 +1594,6 @@ void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) case MILPSolverBoundTighteningType::LP_RELAXATION_INCREMENTAL: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: _networkLevelReasoner->lpRelaxationPropagation(); break; @@ -1660,6 +1659,15 @@ void Engine::performAdditionalBackwardAnalysisIfNeeded() printf( "Backward analysis tightened %u bounds\n", tightened ); } } + + if ( _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) + { + performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); + unsigned tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); + if ( _verbosity > 0 ) + printf( "Backward analysis tightened %u bounds\n", tightened ); + } } void Engine::performMILPSolverBoundedTighteningForSingleLayer( unsigned targetIndex ) @@ -1687,7 +1695,6 @@ void Engine::performMILPSolverBoundedTighteningForSingleLayer( unsigned targetIn return; case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: case MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION: case MILPSolverBoundTighteningType::NONE: @@ -3810,4 +3817,4 @@ void Engine::addPLCLemma( std::shared_ptr &explanation ) ASSERT( explanation && _UNSATCertificate && _UNSATCertificateCurrentPointer ) _statistics.incUnsignedAttribute( Statistics::NUM_LEMMAS ); _UNSATCertificateCurrentPointer->get()->addPLCLemma( explanation ); -} \ No newline at end of file +} diff --git a/src/engine/MILPSolverBoundTighteningType.h b/src/engine/MILPSolverBoundTighteningType.h index 760f82e8d1..517acf73e9 100644 --- a/src/engine/MILPSolverBoundTighteningType.h +++ b/src/engine/MILPSolverBoundTighteningType.h @@ -34,11 +34,9 @@ enum class MILPSolverBoundTighteningType { // Perform backward analysis BACKWARD_ANALYSIS_ONCE = 5, BACKWARD_ANALYSIS_CONVERGE = 6, - // Perform backward analysis using the INVPROP Algorithm (arXiv:2302.01404v4 [cs.LG]) - BACKWARD_ANALYSIS_INVPROP = 7, // Perform backward analysis using the PreimageApproximation Algorithm (arXiv:2305.03686v4 // [cs.SE]) - BACKWARD_ANALYSIS_PREIMAGE_APPROX = 8, + BACKWARD_ANALYSIS_PREIMAGE_APPROX = 7, // Option to have no MILP bound tightening performed NONE = 10, }; From 570eae8348cb947a7a1ccf3d5be6b6b9c2f0b0f2 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 3 Feb 2025 13:37:48 +0200 Subject: [PATCH 54/81] Add files via upload --- src/configuration/GlobalConfiguration.cpp | 1 + src/configuration/GlobalConfiguration.h | 3 +++ src/configuration/Options.cpp | 2 -- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index a5d5100011..231c24f827 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -73,6 +73,7 @@ const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 25000; const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 25; const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.025; const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.25; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY = 0; const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index 92dcfdd049..dd724819b3 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -169,6 +169,9 @@ class GlobalConfiguration // Learning rate for PreimageApproximation optimization. static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + // Weight decay for PreimageApproximation optimization. + static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY; + // How often should projected steepest edge reset the reference space? static const unsigned PSE_ITERATIONS_BEFORE_RESET; diff --git a/src/configuration/Options.cpp b/src/configuration/Options.cpp index 17d570133f..ea74231bbe 100644 --- a/src/configuration/Options.cpp +++ b/src/configuration/Options.cpp @@ -207,8 +207,6 @@ MILPSolverBoundTighteningType Options::getMILPSolverBoundTighteningType() const return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE; if ( strategyString == "backward-converge" ) return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE; - if ( strategyString == "backward-invprop" ) - return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP; if ( strategyString == "backward-preimage-approx" ) return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX; else if ( strategyString == "milp" ) From c0bd87bc5f7932d8a87842ee2cd42abcccd85771 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 6 Feb 2025 01:11:37 +0200 Subject: [PATCH 55/81] Update GlobalConfiguration.cpp --- src/configuration/GlobalConfiguration.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index 231c24f827..532286aa69 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -88,7 +88,7 @@ const bool GlobalConfiguration::PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCES const bool GlobalConfiguration::NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; const double GlobalConfiguration::PREPROCESSOR_ALMOST_FIXED_THRESHOLD = 0.00001; -const unsigned GlobalConfiguration::PREPROCESSSING_MAX_TIGHTEING_ROUND = 10; +const unsigned GlobalConfiguration::PREPROCESSSING_MAX_TIGHTEING_ROUND = 1000; const bool GlobalConfiguration::WARM_START = false; From 838b5fbb89a7eb28c59ff1c2ddf9823ddfec5807 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 6 Feb 2025 19:34:10 +0200 Subject: [PATCH 56/81] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8ecc8e54f4..d687bbed57 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,8 @@ - Adding incremental infrastructure which allows pushing and popping constraints to/from the InputQuery. - Dropped support for parsing Tensorflow network format. Newest Marabou version that supports Tensorflow is at commit 190555573e4702. - Fixed bug in the parsing of `transpose` nodes in command line C++ parser. + - Implemented forward-backward abstract interpretation, symbolic bound tightening, interval arithmetic and simulations for all activation functions. + - Implemented backward analysis using preimage-approximation algorithm for `Relu`, `LeakyRelu`, `Sign` and `Bilinear` Layers. ## Version 2.0.0 From 031fcc801bc833676f94acb196beb46ab944c4df Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 6 Feb 2025 19:34:53 +0200 Subject: [PATCH 57/81] Update OptionParser.cpp --- src/configuration/OptionParser.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/configuration/OptionParser.cpp b/src/configuration/OptionParser.cpp index cd94bbc394..fe50a9680f 100644 --- a/src/configuration/OptionParser.cpp +++ b/src/configuration/OptionParser.cpp @@ -267,7 +267,7 @@ void OptionParser::initialize() &( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ) ) ->default_value( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ), "The MILP solver bound tightening type: " - "lp/fb-once/fb-converge/lp-inc/milp/milp-inc/iter-prop/none." ) + "lp/backward-once/backward-converge/backward-preimage-approx/lp-inc/milp/milp-inc/iter-prop/none." ) #endif ; From 5f7a24fc10854e2b3309d67e37f093d1c13550ed Mon Sep 17 00:00:00 2001 From: idan0610 Date: Sat, 15 Feb 2025 09:30:56 +0200 Subject: [PATCH 58/81] smal fix for bug in SumOfInfeasibilitiesManager::proposePhasePatternUpdateRandomly() when the chosen plConstraintToUpdate is fixed --- src/engine/SumOfInfeasibilitiesManager.cpp | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/engine/SumOfInfeasibilitiesManager.cpp b/src/engine/SumOfInfeasibilitiesManager.cpp index 75f372f7f1..38ac64fcf2 100644 --- a/src/engine/SumOfInfeasibilitiesManager.cpp +++ b/src/engine/SumOfInfeasibilitiesManager.cpp @@ -195,8 +195,19 @@ void SumOfInfeasibilitiesManager::proposePhasePatternUpdateRandomly() } ); // First, pick a pl constraint whose cost component we will update. - unsigned index = (unsigned)T::rand() % _plConstraintsInCurrentPhasePattern.size(); - PiecewiseLinearConstraint *plConstraintToUpdate = _plConstraintsInCurrentPhasePattern[index]; + bool fixed = true; + PiecewiseLinearConstraint *plConstraintToUpdate; + while ( fixed ) + { + if ( _plConstraintsInCurrentPhasePattern.empty() ) + return; + + unsigned index = (unsigned)T::rand() % _plConstraintsInCurrentPhasePattern.size(); + plConstraintToUpdate = _plConstraintsInCurrentPhasePattern[index]; + fixed = plConstraintToUpdate->phaseFixed(); + if ( fixed ) + removeCostComponentFromHeuristicCost( plConstraintToUpdate ); + } // Next, pick an alternative phase. PhaseStatus currentPhase = _currentPhasePattern[plConstraintToUpdate]; From f1718d7d75364488c882be5fa11e11c74257fcae Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 20 Feb 2025 14:02:15 +0200 Subject: [PATCH 59/81] Add files via upload --- src/nlr/CMakeLists.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/nlr/CMakeLists.txt b/src/nlr/CMakeLists.txt index f2c2161e65..e377a638ba 100644 --- a/src/nlr/CMakeLists.txt +++ b/src/nlr/CMakeLists.txt @@ -19,6 +19,11 @@ network_level_reasoner_add_unit_test(NetworkLevelReasoner) network_level_reasoner_add_unit_test(WsLayerElimination) network_level_reasoner_add_unit_test(ParallelSolver) +if (${ENABLE_GUROBI}) + network_level_reasoner_add_unit_test(LPRelaxation) +endif() + if (${BUILD_PYTHON}) target_include_directories(${MARABOU_PY} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") endif() + From 8804b245d7019282f5ed587270c821594dfbb1bd Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 20 Feb 2025 14:04:46 +0200 Subject: [PATCH 60/81] Add files via upload --- src/nlr/LPFormulator.cpp | 43 +--------------------------------------- src/nlr/Layer.cpp | 43 ---------------------------------------- 2 files changed, 1 insertion(+), 85 deletions(-) diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 9aad8404f4..c6a76ece4a 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -876,10 +876,6 @@ void LPFormulator::addReluLayerToLpRelaxation( GurobiWrapper &gurobi, } } -<<<<<<< HEAD -======= - ->>>>>>> upstream/master void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -944,10 +940,6 @@ void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, } } -<<<<<<< HEAD -======= - ->>>>>>> upstream/master void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1012,17 +1004,9 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, double ub = std::min( std::max( -sourceLb, sourceUb ), layer->getUb( i ) ); double lb = std::max( 0.0, layer->getLb( i ) ); gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); -<<<<<<< HEAD - /* - The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). - */ -======= + // The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). - /* - The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). - */ ->>>>>>> upstream/master // y >= 0 List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1037,10 +1021,6 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, } } -<<<<<<< HEAD -======= - ->>>>>>> upstream/master void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1073,10 +1053,6 @@ void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); -<<<<<<< HEAD -======= - ->>>>>>> upstream/master double sourceUbSigmoid = SigmoidConstraint::sigmoid( sourceUb ); double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); @@ -1150,10 +1126,6 @@ void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, } } -<<<<<<< HEAD -======= - ->>>>>>> upstream/master void LPFormulator::addSignLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1318,10 +1290,6 @@ void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, } } -<<<<<<< HEAD -======= - ->>>>>>> upstream/master void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1384,11 +1352,6 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, double bias; SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); - -<<<<<<< HEAD -======= - ->>>>>>> upstream/master List terms; if ( FloatUtils::areEqual( lb, ub ) ) { @@ -1603,10 +1566,6 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, } } -<<<<<<< HEAD -======= - ->>>>>>> upstream/master void LPFormulator::addWeightedSumLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index ccd0922748..2341c07596 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -1063,10 +1063,6 @@ void Layer::computeIntervalArithmeticBoundsForSigmoid() } } -<<<<<<< HEAD -======= - ->>>>>>> upstream/master void Layer::computeIntervalArithmeticBoundsForRound() { for ( unsigned i = 0; i < _size; ++i ) @@ -1099,10 +1095,6 @@ void Layer::computeIntervalArithmeticBoundsForRound() } } -<<<<<<< HEAD -======= - ->>>>>>> upstream/master void Layer::computeIntervalArithmeticBoundsForMax() { for ( unsigned i = 0; i < _size; ++i ) @@ -2054,29 +2046,17 @@ void Layer::computeSymbolicBoundsForLeakyRelu() // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) // Concrete upper bound: x_f <= ub_b double width = sourceUb - sourceLb; -<<<<<<< HEAD double weight = ( sourceUb - _alpha * sourceLb ) / width; -======= - double coeff = ( sourceUb - _alpha * sourceLb ) / width; ->>>>>>> upstream/master if ( _alpha <= 1 ) { for ( unsigned j = 0; j < _inputLayerSize; ++j ) { -<<<<<<< HEAD _symbolicUb[j * _size + i] *= weight; } // Do the same for the bias, and then adjust _symbolicUpperBias[i] *= weight; -======= - _symbolicUb[j * _size + i] *= coeff; - } - - // Do the same for the bias, and then adjust - _symbolicUpperBias[i] *= coeff; ->>>>>>> upstream/master _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; @@ -2111,19 +2091,11 @@ void Layer::computeSymbolicBoundsForLeakyRelu() { for ( unsigned j = 0; j < _inputLayerSize; ++j ) { -<<<<<<< HEAD _symbolicLb[j * _size + i] *= weight; } // Do the same for the bias, and then adjust _symbolicLowerBias[i] *= weight; -======= - _symbolicLb[j * _size + i] *= coeff; - } - - // Do the same for the bias, and then adjust - _symbolicLowerBias[i] *= coeff; ->>>>>>> upstream/master _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; if ( sourceUb > sourceLb ) @@ -2236,10 +2208,6 @@ void Layer::computeSymbolicBoundsForLeakyRelu() } } -<<<<<<< HEAD -======= - ->>>>>>> upstream/master void Layer::computeSymbolicBoundsForSigmoid() { std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); @@ -3156,11 +3124,7 @@ void Layer::computeSymbolicBoundsForBilinear() } else { -<<<<<<< HEAD _symbolicUb[j * _size + i] += -======= - _symbolicLb[j * _size + i] += ->>>>>>> upstream/master sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; } @@ -3430,7 +3394,6 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } -<<<<<<< HEAD void Layer::computeParameterisedSymbolicBounds( std::vector coeffs, bool receivePolygonal, bool receive ) @@ -4596,8 +4559,6 @@ Layer::getParametersForLayers( const Map &layers, std::vector return layerIndicesToParameters; } -======= ->>>>>>> upstream/master double Layer::softmaxLSELowerBound( const Vector &inputs, const Vector &inputLbs, const Vector &inputUbs, @@ -4877,10 +4838,6 @@ double Layer::softmaxdERUpperBound( const Vector &inputMids, double li = outputLb[i]; double ui = outputUb[i]; -<<<<<<< HEAD -======= - ->>>>>>> upstream/master if ( i == di ) { double val2 = -1; From 394fe99453b69252fc1dfb06cf29c4646749ed43 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 20 Feb 2025 14:05:46 +0200 Subject: [PATCH 61/81] Add files via upload --- src/nlr/tests/Test_LPRelaxation.h | 5792 +++++++++++++++++++++ src/nlr/tests/Test_NetworkLevelReasoner.h | 3753 +------------ 2 files changed, 5861 insertions(+), 3684 deletions(-) create mode 100644 src/nlr/tests/Test_LPRelaxation.h diff --git a/src/nlr/tests/Test_LPRelaxation.h b/src/nlr/tests/Test_LPRelaxation.h new file mode 100644 index 0000000000..9b58650b2d --- /dev/null +++ b/src/nlr/tests/Test_LPRelaxation.h @@ -0,0 +1,5792 @@ +/********************* */ +/*! \file Test_LPRelaxation.h + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Andrew Wu + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + +**/ + +#include "../../engine/tests/MockTableau.h" // TODO: fix this +#include "DeepPolySoftmaxElement.h" +#include "FloatUtils.h" +#include "Layer.h" +#include "NetworkLevelReasoner.h" +#include "Options.h" +#include "PolygonalTightening.h" +#include "Query.h" +#include "Tightening.h" +#include "Vector.h" + +#include + +class MockForNetworkLevelReasoner +{ +public: +}; + +class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite +{ +public: + MockForNetworkLevelReasoner *mock; + + void setUp() + { + TS_ASSERT( mock = new MockForNetworkLevelReasoner ); + } + + void tearDown() + { + TS_ASSERT_THROWS_NOTHING( delete mock ); + } + + void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 R -1 R -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ R / \ R / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using ReLU activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardReLU2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = ReLU( x3 ) + x8 = ReLU( x4 ) + x9 = ReLU( x5 ) + x10 = ReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = ReLU( x11 ) + x15 = ReLU( x12 ) + x16 = ReLU( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = ReLU( x17 ) + x20 = ReLU( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::RELU, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 S -1 S -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ S / \ S / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Sigmoid activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardSigmoid2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Sigmoid( x3 ) + x8 = Sigmoid( x4 ) + x9 = Sigmoid( x5 ) + x10 = Sigmoid( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Sigmoid( x11 ) + x15 = Sigmoid( x12 ) + x16 = Sigmoid( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Sigmoid( x17 ) + x20 = Sigmoid( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 Sign -1 Sign -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ Sign / \ Sign / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Sign activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardSign2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Sign( x3 ) + x8 = Sign( x4 ) + x9 = SIgn( x5 ) + x10 = Sign( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Sign( x11 ) + x15 = Sign( x12 ) + x16 = Sign( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Sign( x17 ) + x20 = Sign( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 Rnd -1 Rnd -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ Rnd / \ Rnd / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Round activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardRound2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Round( x3 ) + x8 = Round( x4 ) + x9 = Round( x5 ) + x10 = Round( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Round( x11 ) + x15 = Round( x12 ) + x16 = Round( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Round( x17 ) + x20 = Round( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardAbs( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 A -1 A -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ A / \ A / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Absolute value activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardAbs2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Abs( x3 ) + x8 = Abs( x4 ) + x9 = Abs( x5 ) + x10 = Abs( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Abs( x11 ) + x15 = Abs( x12 ) + x16 = Abs( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Abs( x17 ) + x20 = Abs( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 LR -1 LR -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ LR / \ LR / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardLeakyRelu2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = LeakyReLU( x3 ) + x8 = LeakyReLU( x4 ) + x9 = LeakyReLU( x5 ) + x10 = LeakyReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = LeakyReLU( x11 ) + x15 = LeakyReLU( x12 ) + x16 = LeakyReLU( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = LeakyReLU( x17 ) + x20 = LeakyReLU( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + nlr.getLayer( 6 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + a a' + x e + b b' f h + y d + c c' + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + + void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 + x1 x12 x15 x17 + x5 x9 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7, x8, x9, x10 = Softmax( x2, x3, x4, x5 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14, x15, x16 = Softmax( x11, x12, x13 ) + + x17 = Max( x14, x15, x16 ) + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 5, NLR::Layer::MAX, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 3, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 2 ); + nlr.addActivationSource( 1, 0, 2, 3 ); + nlr.addActivationSource( 1, 1, 2, 3 ); + nlr.addActivationSource( 1, 2, 2, 3 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + nlr.addActivationSource( 3, 2, 4, 0 ); + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 1 ); + nlr.addActivationSource( 3, 0, 4, 2 ); + nlr.addActivationSource( 3, 1, 4, 2 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 4, 0, 5, 0 ); + nlr.addActivationSource( 4, 1, 5, 0 ); + nlr.addActivationSource( 4, 2, 5, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 18 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + } + + void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + a a' + x e + b b' f h + y d + c c' + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + x3 x7 + x0 + x4 x8 x11 x13 + x1 x15 + x5 x9 x12 x14 + x2 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = ReLU( x3 ) + x8 = ReLU( x4 ) + x9 = ReLU( x5 ) + x10 = ReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + + x13 = ReLU( x11 ) + x14 = ReLU( x12 ) + + x15 = x13 * x14 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::BILINEAR, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setBias( 3, 0, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + nlr.addActivationSource( 4, 0, 5, 0 ); + nlr.addActivationSource( 4, 1, 5, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 14 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 15 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 16 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + } + + void test_backwards_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 8, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 19, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 20, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sigmoid() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), + + Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), + Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), + + Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), + Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), + + Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), + Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), + Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), + + Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), + Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), + + Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), + Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), + + Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), + Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sigmoid2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), + Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), + + Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), + Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), + Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), + + Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), + Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), + Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), + + Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), + Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), + + Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), + Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), + + Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), + Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), + Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), + + Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), + Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), + Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), + + Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), + Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), + Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), + + Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), + Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), + + Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), + Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), + + Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_abs() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), + + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), + + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_abs2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), + + Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), + + Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 11, -4, Tightening::LB ), + Tightening( 11, 6, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), + + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -10, Tightening::LB ), + Tightening( 7, 5, Tightening::UB ), + + Tightening( 11, -14, Tightening::LB ), + Tightening( 11, 11, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_round2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), + Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), + Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), + + Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), + Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), + + Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), + Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), + + Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), + Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), + Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), + Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), + + Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 4, -0.1, Tightening::LB ), + + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_leaky_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 14, -0.9, Tightening::LB ), + Tightening( 15, -0.89, Tightening::LB ), + Tightening( 16, -0.77, Tightening::LB ), + + Tightening( 19, -2.3133, Tightening::LB ), + Tightening( 20, -1.2, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), + Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 14, -1.1, Tightening::LB ), + Tightening( 15, -2.1771, Tightening::LB ), + Tightening( 16, -1.15, Tightening::LB ), + + Tightening( 19, -5.6259, Tightening::LB ), + Tightening( 20, -1.9, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_softmax_and_max() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), + Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), + Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), + + Tightening( 8, -0.1870, Tightening::LB ), Tightening( 8, 1.4488, Tightening::UB ), + Tightening( 9, 0.6814, Tightening::LB ), Tightening( 9, 2.2432, Tightening::UB ), + + Tightening( 10, 0.6814, Tightening::LB ), Tightening( 10, 2.2432, Tightening::UB ), + + Tightening( 11, -2.2432, Tightening::LB ), Tightening( 11, -0.6814, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.2194, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), + + Tightening( 10, 0.2194, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), + + Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.2194, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_softmax_and_max2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), + Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), + Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), + Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), + + Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9634, Tightening::UB ), + Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9152, Tightening::UB ), + Tightening( 13, -2.9868, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), + + Tightening( 14, 0.1143, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8694, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4596, Tightening::UB ), + + Tightening( 17, 0.1143, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), + + Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9326, Tightening::UB ), + Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), + Tightening( 13, -2.9990, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), + + Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), + + Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_relu_and_bilinear2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1.625, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 11, 0.8472, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 12, -1.75, Tightening::LB ), + Tightening( 13, -4.25, Tightening::LB ), + Tightening( 13, 3.25, Tightening::UB ), + + Tightening( 17, -11.1417, Tightening::LB ), + Tightening( 17, 10, Tightening::UB ), + + Tightening( 21, -17.3084, Tightening::LB ), + Tightening( 21, 3.2160, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 11, -5, Tightening::LB ), + Tightening( 12, -4.6429, Tightening::LB ), + Tightening( 13, 8.5519, Tightening::UB ), + + Tightening( 17, -23.6231, Tightening::LB ), + Tightening( 17, 14.0909, Tightening::UB ), + Tightening( 18, 2, Tightening::LB ), + Tightening( 18, 28.2015, Tightening::UB ), + + Tightening( 21, -29.2015, Tightening::LB ), + Tightening( 21, 6.5734, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 7, -2, Tightening::LB ), + + Tightening( 10, -1.8, Tightening::LB ), + Tightening( 10, 0, Tightening::UB ), + + Tightening( 11, 1.4542, Tightening::LB ), + Tightening( 11, 1.4542, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, 3.1, Tightening::UB ), + Tightening( 7, -3.2, Tightening::LB ), + Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, 3.1, Tightening::UB ), + Tightening( 9, -0.32, Tightening::LB ), + + Tightening( 10, -3.8726, Tightening::LB ), + Tightening( 10, 0.03, Tightening::UB ), + Tightening( 11, 0.4074, Tightening::LB ), + Tightening( 11, 11.3243, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_leaky_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 11, -4.5, Tightening::LB ), + Tightening( 11, 8.5, Tightening::UB ), + Tightening( 12, -2.225, Tightening::LB ), + Tightening( 13, 2.975, Tightening::UB ), + Tightening( 13, -4.175, Tightening::LB ), + + Tightening( 15, -0.2225, Tightening::LB ), + Tightening( 16, 2.975, Tightening::UB ), + Tightening( 16, -0.4175, Tightening::LB ), + + Tightening( 17, -11.452, Tightening::LB ), + Tightening( 17, 10.18, Tightening::UB ), + Tightening( 18, 0.87, Tightening::LB ), + Tightening( 18, 16.0688, Tightening::UB ), + + Tightening( 19, -1.1452, Tightening::LB ), + Tightening( 19, 10.18, Tightening::UB ), + Tightening( 20, 0.87, Tightening::LB ), + Tightening( 20, 16.0688, Tightening::UB ), + + Tightening( 21, -17.0684, Tightening::LB ), + Tightening( 21, 3.6767, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 11, -5.6, Tightening::LB ), + Tightening( 11, 16.4636, Tightening::UB ), + Tightening( 12, -6.0286, Tightening::LB ), + Tightening( 13, -5.9, Tightening::LB ), + Tightening( 13, 8.0468, Tightening::UB ), + + Tightening( 15, -0.6029, Tightening::LB ), + Tightening( 16, -0.59, Tightening::LB ), + Tightening( 16, 8.0468, Tightening::UB ), + + Tightening( 17, -24.8864, Tightening::LB ), + Tightening( 17, 14.3076, Tightening::UB ), + Tightening( 18, 0.75, Tightening::LB ), + Tightening( 18, 28.0272, Tightening::UB ), + + Tightening( 19, -2.4886, Tightening::LB ), + Tightening( 19, 14.3076, Tightening::UB ), + Tightening( 20, 0.75, Tightening::LB ), + Tightening( 20, 28.0272, Tightening::UB ), + + Tightening( 21, -29.9648, Tightening::LB ), + Tightening( 21, 6.9619, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 11, -2.4149, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 5.8235, Tightening::UB ), + + Tightening( 10, -14, Tightening::LB ), + + Tightening( 11, -24.8805, Tightening::LB ), + Tightening( 11, 14, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu_and_bilinear2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 12, -1.75, Tightening::LB ), + + Tightening( 15, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 11, -5, Tightening::LB ), + Tightening( 12, -4.6429, Tightening::LB ), + + Tightening( 15, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + bool boundsEqual( const List &bounds, const List &expectedBounds ) + { + if ( bounds.size() < expectedBounds.size() ) + return false; + + bool allFound = true; + for ( const auto &bound : bounds ) + { + bool currentFound = false; + for ( const auto &expectedBound : expectedBounds ) + { + currentFound |= + ( bound._type == expectedBound._type && + bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + } + allFound &= currentFound; + } + return allFound; + } + + // Create list of all tightenings in newBounds for which there is no bound in newBounds or in + // bounds which is at least as tight. + List removeRedundancies( const List &bounds, + const List &newBounds ) + { + List minimalBounds; + + for ( const auto &newBound : newBounds ) + { + bool foundTighter = false; + for ( const auto &bound : bounds ) + { + foundTighter |= + ( newBound._type == bound._type && newBound._variable == bound._variable && + ( ( newBound._type == Tightening::LB && + FloatUtils::lte( newBound._value, bound._value, 0.0001 ) ) || + ( newBound._type == Tightening::UB && + FloatUtils::gte( newBound._value, bound._value, 0.0001 ) ) ) ); + } + + for ( const auto &bound : newBounds ) + { + foundTighter |= + ( newBound._type == bound._type && newBound._variable == bound._variable && + ( ( newBound._type == Tightening::LB && + FloatUtils::lt( newBound._value, bound._value, 0.0001 ) ) || + ( newBound._type == Tightening::UB && + FloatUtils::gt( newBound._value, bound._value, 0.0001 ) ) ) ); + } + + if ( !foundTighter ) + minimalBounds.append( newBound ); + } + return minimalBounds; + } + + void updateTableau( MockTableau &tableau, List &tightenings ) + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + { + tableau.setLowerBound( tightening._variable, tightening._value ); + } + + if ( tightening._type == Tightening::UB ) + { + tableau.setUpperBound( tightening._variable, tightening._value ); + } + } + } +}; diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 569ce5fcd4..7c46ae4317 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -1936,12 +1936,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* - -<<<<<<< HEAD - 1 R -1 R -1 -======= 1 R -1 R -1 2 ->>>>>>> upstream/master x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -1950,11 +1945,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ R / \ R / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 -<<<<<<< HEAD - 1 -1 2 2 -======= 1 -1 2 ->>>>>>> upstream/master The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -2225,12 +2216,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* - -<<<<<<< HEAD - 1 S -1 S -1 -======= 1 S -1 S -1 2 ->>>>>>> upstream/master x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -2239,11 +2225,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ S / \ S / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 -<<<<<<< HEAD - 1 -1 2 2 -======= 1 -1 2 ->>>>>>> upstream/master The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -2511,12 +2493,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* - -<<<<<<< HEAD - 1 Sign -1 Sign -1 -======= 1 Sign -1 Sign -1 2 ->>>>>>> upstream/master x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -2525,11 +2502,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ Sign / \ Sign / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 -<<<<<<< HEAD - 1 -1 2 2 -======= 1 -1 2 ->>>>>>> upstream/master The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -2798,11 +2771,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* -<<<<<<< HEAD - 1 Rnd -1 Rnd -1 -======= 1 Rnd -1 Rnd -1 2 ->>>>>>> upstream/master x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -2811,11 +2780,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ Rnd / \ Rnd / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 -<<<<<<< HEAD - 1 -1 2 2 -======= 1 -1 2 ->>>>>>> upstream/master The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -3084,11 +3049,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* -<<<<<<< HEAD - 1 A -1 A -1 -======= 1 A -1 A -1 2 ->>>>>>> upstream/master x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -3097,11 +3058,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ A / \ A / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 -<<<<<<< HEAD - 1 -1 2 2 -======= 1 -1 2 ->>>>>>> upstream/master The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -3370,11 +3327,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* -<<<<<<< HEAD - 1 LR -1 LR -1 -======= 1 LR -1 LR -1 2 ->>>>>>> upstream/master x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -3383,11 +3336,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ LR / \ LR / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 -<<<<<<< HEAD - 1 -1 2 2 -======= 1 -1 2 ->>>>>>> upstream/master The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -4904,7 +4853,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); // case 2 input[0] = 1; @@ -4914,7 +4862,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } void test_store_into_other_with_softmax() @@ -4974,7 +4921,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); // case 2 input[0] = 1; @@ -4984,7 +4930,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } void test_generate_input_query() @@ -9934,3592 +9879,96 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( bounds.exists( bound ) ); } - void test_backwards_relu() + void test_get_previous_bias() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), - Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 8, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), - - Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - } ); + populateNetwork( nlr ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + // Generate query to create ReLU constraints + Query query; + nlr.generateQuery( query ); + // Find ReLU constraints from the query + List constraints = query.getPiecewiseLinearConstraints(); - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + for ( const auto &constraint : constraints ) + { + ReluConstraint *relu = dynamic_cast( constraint ); + TS_ASSERT( relu ); - List expectedBounds4( { - Tightening( 8, 0, Tightening::LB ), - Tightening( 9, 0, Tightening::LB ), - } ); + nlr.addConstraintInTopologicalOrder( relu ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + // First ReLU layer (nodes 2,0 through 2,2) has previous bias 1 + if ( relu->getB() == 3 || relu->getB() == 5 || relu->getB() == 7 ) + { + TS_ASSERT_EQUALS( nlr.getPreviousBias( relu ), 1 ); + } + // Second ReLU layer (nodes 4,0 and 4,1) has previous bias 2 + else if ( relu->getB() == 9 || relu->getB() == 11 ) + { + TS_ASSERT_EQUALS( nlr.getPreviousBias( relu ), 2 ); + } + } } - void test_backwards_relu2() + void test_get_previous_bias_error_handling() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), - - Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), - Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), - - Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), - - Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), - - Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 19, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), - Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), - Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), - - Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), - Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), - Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), - - Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - + populateNetwork( nlr ); - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + // Generate invalid ReLU constraint + ReluConstraint invalidRelu( 15, 16 ); // Variables not in network - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), + // Should throw since variables don't exist in network + TS_ASSERT_THROWS_EQUALS( nlr.getPreviousBias( &invalidRelu ), + const NLRError &e, + e.getCode(), + NLRError::RELU_NOT_FOUND ); - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), + // Test missing activation source using fresh network + NLR::NetworkLevelReasoner nlrNoActivations; + // Create minimal network without activation sources + nlrNoActivations.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlrNoActivations.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlrNoActivations.addLayer( 2, NLR::Layer::RELU, 2 ); - Tightening( 20, 0, Tightening::LB ), - } ); + ReluConstraint missingActivation( 2, 3 ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + TS_ASSERT_THROWS_EQUALS( nlrNoActivations.getPreviousBias( &missingActivation ), + const NLRError &e, + e.getCode(), + NLRError::RELU_NOT_FOUND ); } - void test_backwards_sigmoid() + bool boundsEqual( const List &bounds, const List &expectedBounds ) { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid( nlr, tableau ); + if ( bounds.size() < expectedBounds.size() ) + return false; - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); + bool allFound = true; + for ( const auto &bound : bounds ) + { + bool currentFound = false; + for ( const auto &expectedBound : expectedBounds ) + { + currentFound |= + ( bound._type == expectedBound._type && + bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + } + allFound &= currentFound; + } + return allFound; + } - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), - Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), - - Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), - Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), - - Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), - Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), - - Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), - Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), - Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), - - Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), - Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), - - Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), - Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), - - Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), - Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_sigmoid2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), - Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), - Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), - Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), - - Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), - Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), - Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), - - Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), - Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), - Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), - - Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), - Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), - - Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), - Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), - - Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), - Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), - Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), - - Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), - Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), - Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), - - Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), - Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), - Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), - - Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), - Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), - - Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), - Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), - - Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_abs() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), - - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), - - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_abs2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), - - Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), - - Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_round() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 11, -4, Tightening::LB ), - Tightening( 11, 6, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), - - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -10, Tightening::LB ), - Tightening( 7, 5, Tightening::UB ), - - Tightening( 11, -14, Tightening::LB ), - Tightening( 11, 11, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_round2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), - Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), - Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), - - Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), - Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), - - Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), - Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), - - Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), - Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), - Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), - Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), - - Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_sign() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_sign2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_leaky_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 4, -0.1, Tightening::LB ), - - Tightening( 8, -0.045, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), - - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), - - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), - - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_leaky_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyRelu2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), - Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), - Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), - Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), - Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), - - Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), - Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), - - Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), - Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), - - Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.3, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - Tightening( 10, -0.6, Tightening::LB ), - - Tightening( 14, -0.9, Tightening::LB ), - Tightening( 15, -0.89, Tightening::LB ), - Tightening( 16, -0.77, Tightening::LB ), - - Tightening( 19, -2.3133, Tightening::LB ), - Tightening( 20, -1.2, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), - Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), - Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), - Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), - Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), - - Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), - Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), - - Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), - Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), - - Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.5, Tightening::LB ), - Tightening( 9, -0.6, Tightening::LB ), - Tightening( 10, -1.5, Tightening::LB ), - - Tightening( 14, -1.1, Tightening::LB ), - Tightening( 15, -2.1771, Tightening::LB ), - Tightening( 16, -1.15, Tightening::LB ), - - Tightening( 19, -5.6259, Tightening::LB ), - Tightening( 20, -1.9, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_softmax_and_max() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), - Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), - Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), - - Tightening( 8, -0.1519, Tightening::LB ), Tightening( 8, 1.4775, Tightening::UB ), - Tightening( 9, 0.6614, Tightening::LB ), Tightening( 9, 2.3899, Tightening::UB ), - - Tightening( 10, 0.6614, Tightening::LB ), Tightening( 10, 2.3899, Tightening::UB ), - - Tightening( 11, -2.3899, Tightening::LB ), Tightening( 11, -0.6614, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.0674, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), - - Tightening( 10, 0.0674, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), - - Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.0674, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_softmax_and_max2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), - Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), - Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), - Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), - - Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9635, Tightening::UB ), - Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9806, Tightening::UB ), - Tightening( 13, -2.9902, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), - - Tightening( 14, 0.1086, Tightening::LB ), Tightening( 14, 0.9971, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8766, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4595, Tightening::UB ), - - Tightening( 17, 0.1086, Tightening::LB ), Tightening( 17, 0.9971, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), - - Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9989, Tightening::UB ), - Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), - Tightening( 13, -2.9989, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), - - Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9972, Tightening::UB ), - Tightening( 15, 0.0024, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), - - Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9972, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_relu_and_bilinear() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), - - Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_relu_and_bilinear2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), - Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), - - Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), - Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), - - Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - -<<<<<<< HEAD - void test_preimage_approximation_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-preimage-approx" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), - Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1.625, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), - - Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 11, 0.8472, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_preimage_approximation_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-preimage-approx" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), - - Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), - Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), - - Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), - - Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), - - Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 12, -1.75, Tightening::LB ), - Tightening( 13, -4.25, Tightening::LB ), - Tightening( 13, 3.25, Tightening::UB ), - - Tightening( 17, -11.1417, Tightening::LB ), - Tightening( 17, 10, Tightening::UB ), - - Tightening( 21, -17.3084, Tightening::LB ), - Tightening( 21, 3.2160, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), - Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), - Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), - - Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), - Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), - Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), - - Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 11, -5, Tightening::LB ), - Tightening( 12, -4.6429, Tightening::LB ), - Tightening( 13, 8.5519, Tightening::UB ), - - Tightening( 17, -23.6231, Tightening::LB ), - Tightening( 17, 14.0909, Tightening::UB ), - Tightening( 18, 2, Tightening::LB ), - Tightening( 18, 28.2015, Tightening::UB ), - - Tightening( 21, -29.2015, Tightening::LB ), - Tightening( 21, 6.5734, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_preimage_approximation_leaky_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-preimage-approx" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 7, -2, Tightening::LB ), - - Tightening( 10, -1.8, Tightening::LB ), - Tightening( 10, 0, Tightening::UB ), - - Tightening( 11, 1.4542, Tightening::LB ), - Tightening( 11, 1.4542, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), - - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), - - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 6, -2, Tightening::LB ), - Tightening( 6, 3.1, Tightening::UB ), - Tightening( 7, -3.2, Tightening::LB ), - Tightening( 7, 4, Tightening::UB ), - - Tightening( 8, 3.1, Tightening::UB ), - Tightening( 9, -0.32, Tightening::LB ), - - Tightening( 10, -3.8726, Tightening::LB ), - Tightening( 10, 0.03, Tightening::UB ), - Tightening( 11, 0.4074, Tightening::LB ), - Tightening( 11, 11.3243, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_preimage_approximation_leaky_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-preimage-approx" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyRelu2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), - Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), - Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), - Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), - Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), - - Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), - Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), - - Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), - Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), - - Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 11, -4.5, Tightening::LB ), - Tightening( 11, 8.5, Tightening::UB ), - Tightening( 12, -2.225, Tightening::LB ), - Tightening( 13, 2.975, Tightening::UB ), - Tightening( 13, -4.175, Tightening::LB ), - - Tightening( 15, -0.2225, Tightening::LB ), - Tightening( 16, 2.975, Tightening::UB ), - Tightening( 16, -0.4175, Tightening::LB ), - - Tightening( 17, -11.452, Tightening::LB ), - Tightening( 17, 10.18, Tightening::UB ), - Tightening( 18, 0.87, Tightening::LB ), - Tightening( 18, 16.0688, Tightening::UB ), - - Tightening( 19, -1.1452, Tightening::LB ), - Tightening( 19, 10.18, Tightening::UB ), - Tightening( 20, 0.87, Tightening::LB ), - Tightening( 20, 16.0688, Tightening::UB ), - - Tightening( 21, -17.0684, Tightening::LB ), - Tightening( 21, 3.6767, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), - Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), - Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), - Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), - Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), - - Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), - Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), - - Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), - Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), - - Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 11, -5.6, Tightening::LB ), - Tightening( 11, 16.4636, Tightening::UB ), - Tightening( 12, -6.0286, Tightening::LB ), - Tightening( 13, -5.9, Tightening::LB ), - Tightening( 13, 8.0468, Tightening::UB ), - - Tightening( 15, -0.6029, Tightening::LB ), - Tightening( 16, -0.59, Tightening::LB ), - Tightening( 16, 8.0468, Tightening::UB ), - - Tightening( 17, -24.8864, Tightening::LB ), - Tightening( 17, 14.3076, Tightening::UB ), - Tightening( 18, 0.75, Tightening::LB ), - Tightening( 18, 28.0272, Tightening::UB ), - - Tightening( 19, -2.4886, Tightening::LB ), - Tightening( 19, 14.3076, Tightening::UB ), - Tightening( 20, 0.75, Tightening::LB ), - Tightening( 20, 28.0272, Tightening::UB ), - - Tightening( 21, -29.9648, Tightening::LB ), - Tightening( 21, 6.9619, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_preimage_approximation_sign() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-preimage-approx" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_preimage_approximation_sign2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-preimage-approx" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_preimage_approximation_relu_and_bilinear() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-preimage-approx" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), - - Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 11, -2.4149, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 5.8235, Tightening::UB ), - - Tightening( 10, -14, Tightening::LB ), - - Tightening( 11, -24.8805, Tightening::LB ), - Tightening( 11, 14, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_preimage_approximation_relu_and_bilinear2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-preimage-approx" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), - Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), - - Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 12, -1.75, Tightening::LB ), - - Tightening( 15, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), - Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), - - Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 11, -5, Tightening::LB ), - Tightening( 12, -4.6429, Tightening::LB ), - - Tightening( 15, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - bool boundsEqual( const List &bounds, const List &expectedBounds ) - { - if ( bounds.size() < expectedBounds.size() ) -======= - bool boundsEqual( const List &bounds, const List &expectedBounds ) - { - if ( bounds.size() != expectedBounds.size() ) ->>>>>>> upstream/master - return false; - - bool allFound = true; - for ( const auto &bound : bounds ) - { - bool currentFound = false; - for ( const auto &expectedBound : expectedBounds ) - { - currentFound |= - ( bound._type == expectedBound._type && - bound._variable == expectedBound._variable && - FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); - } - allFound &= currentFound; - } - return allFound; - } - -<<<<<<< HEAD - - // Create list of all tightenings in newBounds for which there is no bound in newBounds or in - // bounds which is at least as tight. - List removeRedundancies( const List &bounds, - const List &newBounds ) - { - List minimalBounds; - - for ( const auto &newBound : newBounds ) - { - bool foundTighter = false; - for ( const auto &bound : bounds ) - { - foundTighter |= - ( newBound._type == bound._type && newBound._variable == bound._variable && - ( ( newBound._type == Tightening::LB && - FloatUtils::lte( newBound._value, bound._value, 0.0001 ) ) || - ( newBound._type == Tightening::UB && - FloatUtils::gte( newBound._value, bound._value, 0.0001 ) ) ) ); - } - - for ( const auto &bound : newBounds ) - { - foundTighter |= - ( newBound._type == bound._type && newBound._variable == bound._variable && - ( ( newBound._type == Tightening::LB && - FloatUtils::lt( newBound._value, bound._value, 0.0001 ) ) || - ( newBound._type == Tightening::UB && - FloatUtils::gt( newBound._value, bound._value, 0.0001 ) ) ) ); - } - - if ( !foundTighter ) - minimalBounds.append( newBound ); - } - return minimalBounds; - } - - void updateTableau( MockTableau &tableau, List &tightenings ) - { -======= - void updateTableau( MockTableau &tableau, List &tightenings ) - { - ASSERT( tableau ); ->>>>>>> upstream/master - for ( const auto &tightening : tightenings ) - { - if ( tightening._type == Tightening::LB ) - { - tableau.setLowerBound( tightening._variable, tightening._value ); - } + void updateTableau( MockTableau &tableau, List &tightenings ) + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + { + tableau.setLowerBound( tightening._variable, tightening._value ); + } if ( tightening._type == Tightening::UB ) { @@ -13527,68 +9976,4 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } } } -<<<<<<< HEAD -======= - - void test_get_previous_bias() - { - NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); - - // Generate query to create ReLU constraints - Query query; - nlr.generateQuery( query ); - - // Find ReLU constraints from the query - List constraints = query.getPiecewiseLinearConstraints(); - - for ( const auto &constraint : constraints ) - { - ReluConstraint *relu = dynamic_cast( constraint ); - TS_ASSERT( relu ); - - nlr.addConstraintInTopologicalOrder( relu ); - - // First ReLU layer (nodes 2,0 through 2,2) has previous bias 1 - if ( relu->getB() == 3 || relu->getB() == 5 || relu->getB() == 7 ) - { - TS_ASSERT_EQUALS( nlr.getPreviousBias( relu ), 1 ); - } - // Second ReLU layer (nodes 4,0 and 4,1) has previous bias 2 - else if ( relu->getB() == 9 || relu->getB() == 11 ) - { - TS_ASSERT_EQUALS( nlr.getPreviousBias( relu ), 2 ); - } - } - } - - void test_get_previous_bias_error_handling() - { - NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); - - // Generate invalid ReLU constraint - ReluConstraint invalidRelu( 15, 16 ); // Variables not in network - - // Should throw since variables don't exist in network - TS_ASSERT_THROWS_EQUALS( nlr.getPreviousBias( &invalidRelu ), - const NLRError &e, - e.getCode(), - NLRError::RELU_NOT_FOUND ); - - // Test missing activation source using fresh network - NLR::NetworkLevelReasoner nlrNoActivations; - // Create minimal network without activation sources - nlrNoActivations.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlrNoActivations.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlrNoActivations.addLayer( 2, NLR::Layer::RELU, 2 ); - - ReluConstraint missingActivation( 2, 3 ); - - TS_ASSERT_THROWS_EQUALS( nlrNoActivations.getPreviousBias( &missingActivation ), - const NLRError &e, - e.getCode(), - NLRError::RELU_NOT_FOUND ); - } ->>>>>>> upstream/master }; From 5bdeaa011ebba3fad4f73c224316cfb784e417ce Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 20 Feb 2025 14:06:31 +0200 Subject: [PATCH 62/81] Add files via upload --- CHANGELOG.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 99e50a0297..77be95ad3c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,11 +5,8 @@ - Dropped support for parsing Tensorflow network format. Newest Marabou version that supports Tensorflow is at commit 190555573e4702. - Fixed bug in the parsing of `transpose` nodes in command line C++ parser. - Implemented forward-backward abstract interpretation, symbolic bound tightening, interval arithmetic and simulations for all activation functions. -<<<<<<< HEAD - Implemented backward analysis using preimage-approximation algorithm for `Relu`, `LeakyRelu`, `Sign` and `Bilinear` Layers. -======= - Added the BaBSR heuristic as a new branching strategy for ReLU Splitting ->>>>>>> upstream/master ## Version 2.0.0 From 021054f171f9a0fc718538b91df8aa940ea45cf3 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 20 Feb 2025 14:07:46 +0200 Subject: [PATCH 63/81] Add files via upload --- src/configuration/OptionParser.cpp | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/configuration/OptionParser.cpp b/src/configuration/OptionParser.cpp index 1aba3eff32..54bf7b496d 100644 --- a/src/configuration/OptionParser.cpp +++ b/src/configuration/OptionParser.cpp @@ -267,11 +267,8 @@ void OptionParser::initialize() &( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ) ) ->default_value( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ), "The MILP solver bound tightening type: " -<<<<<<< HEAD - "lp/backward-once/backward-converge/backward-preimage-approx/lp-inc/milp/milp-inc/iter-prop/none." ) -======= - "lp/backward-once/backward-converge/lp-inc/milp/milp-inc/iter-prop/none." ) ->>>>>>> upstream/master + "lp/backward-once/backward-converge/backward-preimage-approx/lp-inc/milp/milp-inc/" + "iter-prop/none." ) #endif ; From d526637a222f855c0b883a519d6a42d7eebb76d2 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Wed, 26 Feb 2025 20:45:33 +0200 Subject: [PATCH 64/81] Pass Vector &coeffs by ref, remove class PolygonalTightening --- src/engine/PolygonalTightening.h | 101 ---------------------- src/nlr/LPFormulator.cpp | 29 +++---- src/nlr/LPFormulator.h | 24 ++--- src/nlr/Layer.cpp | 83 +++++++----------- src/nlr/Layer.h | 22 ++--- src/nlr/LayerOwner.h | 2 - src/nlr/NetworkLevelReasoner.cpp | 23 +---- src/nlr/NetworkLevelReasoner.h | 17 +--- src/nlr/tests/Test_LPRelaxation.h | 1 - src/nlr/tests/Test_NetworkLevelReasoner.h | 1 - 10 files changed, 72 insertions(+), 231 deletions(-) delete mode 100644 src/engine/PolygonalTightening.h diff --git a/src/engine/PolygonalTightening.h b/src/engine/PolygonalTightening.h deleted file mode 100644 index 99dabafa85..0000000000 --- a/src/engine/PolygonalTightening.h +++ /dev/null @@ -1,101 +0,0 @@ -/********************* */ -/*! \file PolygonalTightening.h - ** \verbatim - ** Top contributors (to current version): - ** Duligur Ibeling, Guy Katz, Ido Shmuel - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - -**/ - -#ifndef __PolygonalTightening_h__ -#define __PolygonalTightening_h__ - -#include "Map.h" -#include "NeuronIndex.h" - -#include - -class PolygonalTightening -{ -public: - enum PolygonalBoundType { - LB = 0, - UB = 1, - }; - - PolygonalTightening( Map neuronToCoefficient, - double value, - PolygonalBoundType type ) - : _neuronToCoefficient( neuronToCoefficient ) - , _value( value ) - , _type( type ) - { - } - - /* - The coefficient of each neuron. - */ - Map _neuronToCoefficient; - - /* - Its new value. - */ - double _value; - - /* - Whether the tightening tightens the - lower bound or the upper bound. - */ - PolygonalBoundType _type; - - /* - Equality operator. - */ - bool operator==( const PolygonalTightening &other ) const - { - bool allFound = true; - for ( const auto &pair : _neuronToCoefficient ) - { - bool currentFound = false; - for ( const auto &otherPair : other._neuronToCoefficient ) - { - currentFound |= ( pair.first._layer == otherPair.first._layer && - pair.first._neuron == otherPair.first._neuron && - pair.second == otherPair.second ); - } - allFound &= currentFound; - } - bool result = allFound && _value == other._value && _type == other._type && - _neuronToCoefficient.size() == other._neuronToCoefficient.size(); - return result; - } - - void dump() const - { - printf( "PolygonalTightening: x %s %.2lf\n", _type == LB ? ">=" : "<=", _value ); - - for ( const auto &pair : _neuronToCoefficient ) - { - printf( "NeuronIndex: (layer %u, neuron %u), Coefficient: %.2lf )\n", - pair.first._layer, - pair.first._neuron, - pair.second ); - } - } -}; - -#endif // __PolygonalTightening_h__ - -// -// Local Variables: -// compile-command: "make -C ../.. " -// tags-file-name: "../../TAGS" -// c-basic-offset: 4 -// End: -// s diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index c6a76ece4a..b4152769d2 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -250,7 +250,7 @@ void LPFormulator::optimizeBoundsWithIncrementalLpRelaxation( const Map &layers, bool backward, - std::vector coeffs ) + const Vector &coeffs ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -339,7 +339,7 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map void LPFormulator::optimizeBoundsWithPreimageApproximation( Map &layers ) { - std::vector optimal_coeffs = layers[0]->OptimalParameterisedSymbolicBoundTightening(); + const Vector &optimal_coeffs = layers[0]->OptimalParameterisedSymbolicBoundTightening(); optimizeBoundsWithLpRelaxation( layers, false, optimal_coeffs ); optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs ); } @@ -419,7 +419,7 @@ void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map coeffs ) + const Vector &coeffs ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -657,7 +657,7 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & void LPFormulator::createLPRelaxation( const Map &layers, GurobiWrapper &gurobi, unsigned lastLayer, - std::vector coeffs ) + const Vector &coeffs ) { for ( const auto &layer : layers ) { @@ -669,9 +669,9 @@ void LPFormulator::createLPRelaxation( const Map &layers, addLayerToModel( gurobi, layer.second, false ); else { - Map> layerIndicesToParameters = + Map> layerIndicesToParameters = layer.second->getParametersForLayers( layers, coeffs ); - std::vector currentLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; + const Vector ¤tLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; addLayerToParameterisedModel( gurobi, layer.second, false, currentLayerCoeffs ); } } @@ -680,7 +680,7 @@ void LPFormulator::createLPRelaxation( const Map &layers, void LPFormulator::createLPRelaxationAfter( const Map &layers, GurobiWrapper &gurobi, unsigned firstLayer, - std::vector coeffs ) + const Vector &coeffs ) { unsigned depth = GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH; std::priority_queue, std::greater> layersToAdd; @@ -702,9 +702,9 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers addLayerToModel( gurobi, currentLayer, true ); else { - Map> layerIndicesToParameters = + Map> layerIndicesToParameters = currentLayer->getParametersForLayers( layers, coeffs ); - std::vector currentLayerCoeffs = + const Vector ¤tLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; addLayerToParameterisedModel( gurobi, currentLayer, true, currentLayerCoeffs ); } @@ -1006,7 +1006,6 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); // The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). - // y >= 0 List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1722,7 +1721,7 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ) + const Vector &coeffs ) { switch ( layer->getLayerType() ) { @@ -1751,7 +1750,7 @@ void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ) + const Vector &coeffs ) { double coeff = coeffs[0]; for ( unsigned i = 0; i < layer->getSize(); ++i ) @@ -1852,7 +1851,7 @@ void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurob void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ) + const Vector &coeffs ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { @@ -1939,7 +1938,7 @@ void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurob void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ) + const Vector &coeffs ) { double slope = layer->getAlpha(); double coeff = coeffs[0]; @@ -2039,7 +2038,7 @@ void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper & void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ) + const Vector &coeffs ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index d89af4c331..ac9ecc4557 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -20,7 +20,6 @@ #include "LayerOwner.h" #include "Map.h" #include "ParallelSolver.h" -#include "PolygonalTightening.h" #include #include @@ -54,7 +53,7 @@ class LPFormulator : public ParallelSolver */ void optimizeBoundsWithLpRelaxation( const Map &layers, bool backward = false, - std::vector coeffs = {} ); + const Vector &coeffs = Vector( {} ) ); void optimizeBoundsWithPreimageApproximation( Map &layers ); void optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ); @@ -76,11 +75,11 @@ class LPFormulator : public ParallelSolver void createLPRelaxation( const Map &layers, GurobiWrapper &gurobi, unsigned lastLayer = UINT_MAX, - std::vector coeffs = {} ); + const Vector &coeffs = Vector( {} ) ); void createLPRelaxationAfter( const Map &layers, GurobiWrapper &gurobi, unsigned firstLayer, - std::vector coeffs = {} ); + const Vector &coeffs = Vector( {} ) ); double solveLPRelaxation( GurobiWrapper &gurobi, const Map &layers, MinOrMax minOrMax, @@ -132,36 +131,37 @@ class LPFormulator : public ParallelSolver const Layer *layer, bool createVariables ); - void optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args, - bool backward, - std::vector coeffs = {} ); + void + optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args, + bool backward, + const Vector &coeffs = Vector( {} ) ); // Create LP relaxations depending on external parameters. void addLayerToParameterisedModel( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ); + const Vector &coeffs ); void addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ); + const Vector &coeffs ); void addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ); + const Vector &coeffs ); void addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ); + const Vector &coeffs ); void addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ); + const Vector &coeffs ); /* diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 2341c07596..46db84ae8a 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -3394,9 +3394,7 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } -void Layer::computeParameterisedSymbolicBounds( std::vector coeffs, - bool receivePolygonal, - bool receive ) +void Layer::computeParameterisedSymbolicBounds( const Vector &coeffs, bool receive ) { switch ( _type ) { @@ -3420,30 +3418,9 @@ void Layer::computeParameterisedSymbolicBounds( std::vector coeffs, computeSymbolicBounds(); break; } - - if ( receivePolygonal ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - Map lower_polygonal_tightening; - Map upper_polygonal_tightening; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - const NeuronIndex neuron( 0, j ); - lower_polygonal_tightening.insert( neuron, _symbolicLb[j * _size + i] ); - upper_polygonal_tightening.insert( neuron, _symbolicUb[j * _size + i] ); - } - - _layerOwner->receivePolygonalTighterBound( PolygonalTightening( - lower_polygonal_tightening, _symbolicLowerBias[i], PolygonalTightening::LB ) ); - _layerOwner->receivePolygonalTighterBound( PolygonalTightening( - upper_polygonal_tightening, _symbolicUpperBias[i], PolygonalTightening::UB ) ); - } - } } -void Layer::computeParameterisedSymbolicBoundsForRelu( std::vector coeffs, bool receive ) +void Layer::computeParameterisedSymbolicBoundsForRelu( const Vector &coeffs, bool receive ) { ASSERT( coeffs.size() == 1 ); @@ -3623,7 +3600,7 @@ void Layer::computeParameterisedSymbolicBoundsForRelu( std::vector coeff } } -void Layer::computeParameterisedSymbolicBoundsForSign( std::vector coeffs, bool receive ) +void Layer::computeParameterisedSymbolicBoundsForSign( const Vector &coeffs, bool receive ) { ASSERT( coeffs.size() == 2 ); ASSERT( coeffs[0] >= 0 && coeffs[0] <= 1 ); @@ -3840,7 +3817,7 @@ void Layer::computeParameterisedSymbolicBoundsForSign( std::vector coeff } } -void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( std::vector coeffs, +void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( const Vector &coeffs, bool receive ) { ASSERT( _alpha > 0 && _alpha < 1 ); @@ -4084,7 +4061,7 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( std::vector } } -void Layer::computeParameterisedSymbolicBoundsForBilinear( std::vector coeffs, +void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector &coeffs, bool receive ) { ASSERT( coeffs.size() == 2 ); @@ -4322,7 +4299,7 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( std::vector c } } -std::vector Layer::OptimalParameterisedSymbolicBoundTightening() +const Vector Layer::OptimalParameterisedSymbolicBoundTightening() { const Map &layers = _layerOwner->getLayerIndexToLayer(); @@ -4333,17 +4310,19 @@ std::vector Layer::OptimalParameterisedSymbolicBoundTightening() double epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; double weight_decay = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY; double lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + unsigned dimension = getNumberOfParameters( layers ); bool maximize = false; - unsigned dimension = getNumberOfParameters( layers ); - std::vector lower_bounds; - std::vector upper_bounds; - std::vector guess; - lower_bounds.resize( dimension, 0 ); - upper_bounds.resize( dimension, 1 ); - guess.resize( dimension, 0 ); + Vector lower_bounds( dimension ); + Vector upper_bounds( dimension ); + for ( size_t j = 0; j < dimension; ++j ) + { + lower_bounds[j] = 0; + upper_bounds[j] = 1; + } // Initialize initial guess uniformly. + Vector guess( dimension ); std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); std::uniform_real_distribution dis( 0, 1 ); for ( size_t j = 0; j < dimension; ++j ) @@ -4353,16 +4332,15 @@ std::vector Layer::OptimalParameterisedSymbolicBoundTightening() guess[j] = lb + dis( rng ) * ( ub - lb ); } - std::vector> candidates( dimension ); - std::vector gradient( dimension ); - std::vector current_minimum = guess; + Vector> candidates( dimension ); + Vector gradient( dimension ); for ( size_t i = 0; i < max_iterations; ++i ) { double current_cost = EstimateVolume( guess ); for ( size_t j = 0; j < dimension; ++j ) { - candidates[j] = guess; + candidates[j] = Vector( guess ); candidates[j][j] += step_size; if ( candidates[j][j] > upper_bounds[j] || candidates[j][j] < lower_bounds[j] ) @@ -4405,19 +4383,20 @@ std::vector Layer::OptimalParameterisedSymbolicBoundTightening() } } - return guess; + const Vector optimal_coeffs( guess ); + return optimal_coeffs; } -double Layer::EstimateVolume( std::vector coeffs ) +double Layer::EstimateVolume( const Vector &coeffs ) { // First, run parameterised symbolic bound propagation. const Map &layers = _layerOwner->getLayerIndexToLayer(); - Map> layerIndicesToParameters = + Map> layerIndicesToParameters = getParametersForLayers( layers, coeffs ); for ( unsigned i = 0; i < layers.size(); ++i ) { ASSERT( layers.exists( i ) ); - std::vector currentLayerCoeffs = layerIndicesToParameters[i]; + const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; layers[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); } @@ -4521,23 +4500,21 @@ unsigned Layer::getNumberOfParameters( const Map &layers ) return index; } -Map> -Layer::getParametersForLayers( const Map &layers, std::vector coeffs ) +Map> Layer::getParametersForLayers( const Map &layers, + const Vector &coeffs ) { unsigned index = 0; - Map> layerIndicesToParameters; + Map> layerIndicesToParameters; for ( auto pair : layers ) { unsigned layerIndex = pair.first; Layer *layer = layers[layerIndex]; - std::vector parameters = {}; switch ( layer->getLayerType() ) { case Layer::RELU: case Layer::LEAKY_RELU: { - parameters = std::vector( coeffs.begin() + index, coeffs.begin() + index + 1 ); - layerIndicesToParameters.insert( layerIndex, parameters ); + layerIndicesToParameters.insert( layerIndex, Vector( { coeffs[index] } ) ); index++; } break; @@ -4545,14 +4522,14 @@ Layer::getParametersForLayers( const Map &layers, std::vector case Layer::SIGN: case Layer::BILINEAR: { - parameters = std::vector( coeffs.begin() + index, coeffs.begin() + index + 2 ); - layerIndicesToParameters.insert( layerIndex, parameters ); + layerIndicesToParameters.insert( + layerIndex, Vector( { coeffs[index], coeffs[index + 1] } ) ); index += 2; } break; default: - layerIndicesToParameters.insert( layerIndex, parameters ); + layerIndicesToParameters.insert( layerIndex, Vector( {} ) ); break; } } diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index c61751ad74..a9899964c2 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -138,20 +138,18 @@ class Layer void obtainCurrentBounds(); void computeIntervalArithmeticBounds(); void computeSymbolicBounds(); - void computeParameterisedSymbolicBounds( std::vector coeffs, - bool receivePolygonal = false, - bool receive = false ); + void computeParameterisedSymbolicBounds( const Vector &coeffs, bool receive = false ); // Get total number of optimizable parameters for parameterised SBT relaxation. unsigned getNumberOfParameters( const Map &layers ); // Get map containing vector of optimizable parameters for parameterised SBT relaxation for // every layer index. - Map> getParametersForLayers( const Map &layers, - std::vector coeffs ); + Map> getParametersForLayers( const Map &layers, + const Vector &coeffs ); // Return optimizable parameters which minimize parameterised SBT bounds' volume. - std::vector OptimalParameterisedSymbolicBoundTightening(); + const Vector OptimalParameterisedSymbolicBoundTightening(); /* Preprocessing functionality: variable elimination and reindexing @@ -310,13 +308,15 @@ class Layer /* Helper functions for parameterised symbolic bound tightening */ - void computeParameterisedSymbolicBoundsForRelu( std::vector coeffs, bool receive ); - void computeParameterisedSymbolicBoundsForSign( std::vector coeffs, bool receive ); - void computeParameterisedSymbolicBoundsForLeakyRelu( std::vector coeffs, bool receive ); - void computeParameterisedSymbolicBoundsForBilinear( std::vector coeffs, bool receive ); + void computeParameterisedSymbolicBoundsForRelu( const Vector &coeffs, bool receive ); + void computeParameterisedSymbolicBoundsForSign( const Vector &coeffs, bool receive ); + void computeParameterisedSymbolicBoundsForLeakyRelu( const Vector &coeffs, + bool receive ); + void computeParameterisedSymbolicBoundsForBilinear( const Vector &coeffs, + bool receive ); // Estimate Volume of parameterised symbolic bound tightening. - double EstimateVolume( std::vector coeffs ); + double EstimateVolume( const Vector &coeffs ); // Return difference between given point and upper and lower bounds determined by parameterised // SBT relaxation. diff --git a/src/nlr/LayerOwner.h b/src/nlr/LayerOwner.h index b8ae03cb0f..180a4fdebd 100644 --- a/src/nlr/LayerOwner.h +++ b/src/nlr/LayerOwner.h @@ -17,7 +17,6 @@ #define __LayerOwner_h__ #include "ITableau.h" -#include "PolygonalTightening.h" #include "Tightening.h" namespace NLR { @@ -36,7 +35,6 @@ class LayerOwner virtual const ITableau *getTableau() const = 0; virtual unsigned getNumberOfLayers() const = 0; virtual void receiveTighterBound( Tightening tightening ) = 0; - virtual void receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ) = 0; }; } // namespace NLR diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index f80784f01c..ed07a1d2d5 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -198,36 +198,19 @@ void NetworkLevelReasoner::clearConstraintTightenings() _boundTightenings.clear(); } -void NetworkLevelReasoner::receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ) -{ - _polygonalBoundTightenings.append( polygonal_tightening ); -} - -void NetworkLevelReasoner::getConstraintPolygonalTightenings( - List &polygonal_tightenings ) -{ - polygonal_tightenings = _polygonalBoundTightenings; - _polygonalBoundTightenings.clear(); -} - -void NetworkLevelReasoner::clearConstraintPolygonalTightenings() -{ - _polygonalBoundTightenings.clear(); -} - void NetworkLevelReasoner::symbolicBoundPropagation() { for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) _layerIndexToLayer[i]->computeSymbolicBounds(); } -void NetworkLevelReasoner::parameterisedSymbolicBoundPropagation( std::vector coeffs ) +void NetworkLevelReasoner::parameterisedSymbolicBoundPropagation( const Vector &coeffs ) { - Map> layerIndicesToParameters = + Map> layerIndicesToParameters = _layerIndexToLayer[0]->getParametersForLayers( _layerIndexToLayer, coeffs ); for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) { - std::vector currentLayerCoeffs = layerIndicesToParameters[i]; + const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; _layerIndexToLayer[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); } } diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index a09b4a4712..03389789c4 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -24,7 +24,6 @@ #include "MatrixMultiplication.h" #include "NeuronIndex.h" #include "PiecewiseLinearFunctionType.h" -#include "PolygonalTightening.h" #include "Tightening.h" #include "Vector.h" @@ -121,13 +120,6 @@ class NetworkLevelReasoner : public LayerOwner - getConstraintTightenings: this is the function that an external user calls in order to collect the tighter bounds discovered by the NLR. - - - receiveTighterPolygonalBound: this is a callback from the layer - objects, through which they report tighter polygonal bounds. - - - getConstraintPolygonalTightenings: this is the function that an - external user calls in order to collect the tighter polygonal bounds - discovered by the NLR. */ void setTableau( const ITableau *tableau ); @@ -137,7 +129,7 @@ class NetworkLevelReasoner : public LayerOwner void obtainCurrentBounds(); void intervalArithmeticBoundPropagation(); void symbolicBoundPropagation(); - void parameterisedSymbolicBoundPropagation( std::vector coeffs ); + void parameterisedSymbolicBoundPropagation( const Vector &coeffs ); void deepPolyPropagation(); void lpRelaxationPropagation(); void LPTighteningForOneLayer( unsigned targetIndex ); @@ -149,10 +141,6 @@ class NetworkLevelReasoner : public LayerOwner void getConstraintTightenings( List &tightenings ); void clearConstraintTightenings(); - void receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ); - void getConstraintPolygonalTightenings( List &polygonal_tightenings ); - void clearConstraintPolygonalTightenings(); - /* For debugging purposes: dump the network topology */ @@ -223,9 +211,8 @@ class NetworkLevelReasoner : public LayerOwner Map _layerIndexToLayer; const ITableau *_tableau; - // Tightenings and Polyognal Tightenings discovered by the various layers + // Tightenings discovered by the various layers List _boundTightenings; - List _polygonalBoundTightenings; std::unique_ptr _deepPolyAnalysis; diff --git a/src/nlr/tests/Test_LPRelaxation.h b/src/nlr/tests/Test_LPRelaxation.h index 9b58650b2d..654d5c1af0 100644 --- a/src/nlr/tests/Test_LPRelaxation.h +++ b/src/nlr/tests/Test_LPRelaxation.h @@ -19,7 +19,6 @@ #include "Layer.h" #include "NetworkLevelReasoner.h" #include "Options.h" -#include "PolygonalTightening.h" #include "Query.h" #include "Tightening.h" #include "Vector.h" diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 7c46ae4317..f194bc3ed6 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -19,7 +19,6 @@ #include "Layer.h" #include "NetworkLevelReasoner.h" #include "Options.h" -#include "PolygonalTightening.h" #include "Query.h" #include "Tightening.h" #include "Vector.h" From 5cf4d8e6d1feb351c7bbf2ba3b93d64932083581 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Jun 2025 19:44:29 +0300 Subject: [PATCH 65/81] Starting PMNR: Restore PolygonalTightening class, add backward-pmnr option, global constants, function skeletons and implement countNonlinearLayers, addPolyognalTighteningsToLpRelaxation. --- src/configuration/GlobalConfiguration.cpp | 11 +- src/configuration/GlobalConfiguration.h | 22 +++ src/configuration/OptionParser.cpp | 3 +- src/configuration/Options.cpp | 2 + src/engine/Engine.cpp | 16 ++ src/engine/MILPSolverBoundTighteningType.h | 2 + src/engine/PolygonalTightening.h | 101 +++++++++++ src/nlr/LPFormulator.cpp | 113 ++++++++++-- src/nlr/LPFormulator.h | 30 +++- src/nlr/Layer.cpp | 191 ++++++++++++--------- src/nlr/Layer.h | 30 +++- src/nlr/LayerOwner.h | 2 + src/nlr/NetworkLevelReasoner.cpp | 20 +++ src/nlr/NetworkLevelReasoner.h | 16 +- 14 files changed, 450 insertions(+), 109 deletions(-) create mode 100644 src/engine/PolygonalTightening.h diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index 323983234f..0dc1fc42b8 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -67,14 +67,22 @@ const unsigned GlobalConfiguration::ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS = const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; + const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; -const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 25000; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 25; const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.025; const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.25; const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY = 0; +const unsigned GlobalConfiguration::INVPROP_MAX_ITERATIONS = 25000; +const double GlobalConfiguration::INVPROP_STEP_SIZE = 0.0025; +const double GlobalConfiguration::INVPROP_LEARNING_RATE = 0.025; +const double GlobalConfiguration::INVPROP_WEIGHT_DECAY = 0; +const double GlobalConfiguration::INVPROP_INITIAL_ALPHA = 0.5; +const double GlobalConfiguration::INVPROP_INITIAL_GAMMA = 0.025; + const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; const double GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT = 0.00000000001; @@ -125,6 +133,7 @@ const bool GlobalConfiguration::WRITE_JSON_PROOF = false; const unsigned GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH = 3; const unsigned GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS = 10; +const unsigned GlobalConfiguration::MAX_ROUNDS_OF_PMNR_BACKWARD_ANALYSIS = 10; #ifdef ENABLE_GUROBI const unsigned GlobalConfiguration::GUROBI_NUMBER_OF_THREADS = 1; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index ad8baf68c1..3191c82b33 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -172,6 +172,24 @@ class GlobalConfiguration // Weight decay for PreimageApproximation optimization. static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY; + // Maximum iterations for INVPROP optimization. + static const unsigned INVPROP_MAX_ITERATIONS; + + // Step size for INVPROP optimization. + static const double INVPROP_STEP_SIZE; + + // Learning rate for INVPROP optimization. + static const double INVPROP_LEARNING_RATE; + + // Weight decay for INVPROP optimization. + static const double INVPROP_WEIGHT_DECAY; + + // Initial alpha values for INVPROP optimization. + static const double INVPROP_INITIAL_ALPHA; + + // Initial gamma values for INVPROP optimization. + static const double INVPROP_INITIAL_GAMMA; + // How often should projected steepest edge reset the reference space? static const unsigned PSE_ITERATIONS_BEFORE_RESET; @@ -284,6 +302,10 @@ class GlobalConfiguration */ static const unsigned MAX_ROUNDS_OF_BACKWARD_ANALYSIS; + /* How many rounds of backward analysis to perform for PMNR algorithm? + */ + static const unsigned MAX_ROUNDS_OF_PMNR_BACKWARD_ANALYSIS; + #ifdef ENABLE_GUROBI /* The number of threads Gurobi spawns diff --git a/src/configuration/OptionParser.cpp b/src/configuration/OptionParser.cpp index 54bf7b496d..249862a930 100644 --- a/src/configuration/OptionParser.cpp +++ b/src/configuration/OptionParser.cpp @@ -267,7 +267,8 @@ void OptionParser::initialize() &( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ) ) ->default_value( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ), "The MILP solver bound tightening type: " - "lp/backward-once/backward-converge/backward-preimage-approx/lp-inc/milp/milp-inc/" + "lp/backward-once/backward-converge/backward-preimage-approx/backward-pmnr/lp-inc/milp/" + "milp-inc/" "iter-prop/none." ) #endif ; diff --git a/src/configuration/Options.cpp b/src/configuration/Options.cpp index 0ddd143c13..d9fa7b0b3b 100644 --- a/src/configuration/Options.cpp +++ b/src/configuration/Options.cpp @@ -211,6 +211,8 @@ MILPSolverBoundTighteningType Options::getMILPSolverBoundTighteningType() const return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE; if ( strategyString == "backward-preimage-approx" ) return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX; + if ( strategyString == "backward-pmnr" ) + return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR; else if ( strategyString == "milp" ) return MILPSolverBoundTighteningType::MILP_ENCODING; else if ( strategyString == "milp-inc" ) diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index a3d0d82aeb..0c5163d660 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -1595,6 +1595,7 @@ void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR: _networkLevelReasoner->lpRelaxationPropagation(); break; case MILPSolverBoundTighteningType::MILP_ENCODING: @@ -1668,6 +1669,20 @@ void Engine::performAdditionalBackwardAnalysisIfNeeded() if ( _verbosity > 0 ) printf( "Backward analysis tightened %u bounds\n", tightened ); } + + if ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR ) + { + unsigned tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); + if ( _verbosity > 0 ) + printf( "Backward analysis tightened %u bounds\n", tightened ); + while ( tightened && GlobalConfiguration::MAX_ROUNDS_OF_PMNR_BACKWARD_ANALYSIS ) + { + performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); + tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); + if ( _verbosity > 0 ) + printf( "Backward analysis tightened %u bounds\n", tightened ); + } + } } void Engine::performMILPSolverBoundedTighteningForSingleLayer( unsigned targetIndex ) @@ -1696,6 +1711,7 @@ void Engine::performMILPSolverBoundedTighteningForSingleLayer( unsigned targetIn case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR: case MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION: case MILPSolverBoundTighteningType::NONE: return; diff --git a/src/engine/MILPSolverBoundTighteningType.h b/src/engine/MILPSolverBoundTighteningType.h index 517acf73e9..ee4539fdc3 100644 --- a/src/engine/MILPSolverBoundTighteningType.h +++ b/src/engine/MILPSolverBoundTighteningType.h @@ -37,6 +37,8 @@ enum class MILPSolverBoundTighteningType { // Perform backward analysis using the PreimageApproximation Algorithm (arXiv:2305.03686v4 // [cs.SE]) BACKWARD_ANALYSIS_PREIMAGE_APPROX = 7, + // Perform backward analysis using the PMNR Algorithm + BACKWARD_ANALYSIS_PMNR = 8, // Option to have no MILP bound tightening performed NONE = 10, }; diff --git a/src/engine/PolygonalTightening.h b/src/engine/PolygonalTightening.h new file mode 100644 index 0000000000..e007f1bbb2 --- /dev/null +++ b/src/engine/PolygonalTightening.h @@ -0,0 +1,101 @@ +/********************* */ +/*! \file PolygonalTightening.h + ** \verbatim + ** Top contributors (to current version): + ** Duligur Ibeling, Guy Katz, Ido Shmuel + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + **/ + +#ifndef __PolygonalTightening_h__ +#define __PolygonalTightening_h__ + +#include "Map.h" +#include "NeuronIndex.h" + +#include + +class PolygonalTightening +{ +public: + enum PolygonalBoundType { + LB = 0, + UB = 1, + }; + + PolygonalTightening( Map neuronToCoefficient, + double value, + PolygonalBoundType type ) + : _neuronToCoefficient( neuronToCoefficient ) + , _value( value ) + , _type( type ) + { + } + + /* + The coefficient of each neuron. + */ + Map _neuronToCoefficient; + + /* + Its new value. + */ + double _value; + + /* + Whether the tightening tightens the + lower bound or the upper bound. + */ + PolygonalBoundType _type; + + /* + Equality operator. + */ + bool operator==( const PolygonalTightening &other ) const + { + bool allFound = true; + for ( const auto &pair : _neuronToCoefficient ) + { + bool currentFound = false; + for ( const auto &otherPair : other._neuronToCoefficient ) + { + currentFound |= ( pair.first._layer == otherPair.first._layer && + pair.first._neuron == otherPair.first._neuron && + pair.second == otherPair.second ); + } + allFound &= currentFound; + } + bool result = allFound && _value == other._value && _type == other._type && + _neuronToCoefficient.size() == other._neuronToCoefficient.size(); + return result; + } + + /* + Get matching coefficent for a NeuronIndex, + return 0 if the NeuronIndex is not present. + */ + double getCoeff( NLR::NeuronIndex index ) const + { + if ( _neuronToCoefficient.exists( index ) ) + return _neuronToCoefficient[index]; + return 0; + } + + void dump() const + { + printf( "PolygonalTightening: x %s %.2lf\n", _type == LB ? ">=" : "<=", _value ); + for ( const auto &pair : _neuronToCoefficient ) + { + printf( "NeuronIndex: (layer %u, neuron %u), Coefficient: %.2lf )\n", + pair.first._layer, + pair.first._neuron, + pair.second ); + } + } +}; +#endif // __PolygonalTightening_h__s diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index b4152769d2..4363ecba2f 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -248,9 +248,11 @@ void LPFormulator::optimizeBoundsWithIncrementalLpRelaxation( const Map &layers, - bool backward, - const Vector &coeffs ) +void LPFormulator::optimizeBoundsWithLpRelaxation( + const Map &layers, + bool backward, + const Vector &coeffs, + const Vector &polygonal_tightenings ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -303,7 +305,8 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map &solverToIndex ); // optimize every neuron of layer - optimizeBoundsOfNeuronsWithLpRelaxation( argument, backward, coeffs ); + optimizeBoundsOfNeuronsWithLpRelaxation( + argument, backward, coeffs, polygonal_tightenings ); LPFormulator_LOG( Stringf( "Tightening bound for layer %u - done", layerIndex ).ascii() ); } @@ -344,6 +347,15 @@ void LPFormulator::optimizeBoundsWithPreimageApproximation( Map &layers ) +{ + const Vector &polygonal_tightenings = + layers[0]->OptimizeParameterisedPolygonalTightening(); + const Vector &optimal_coeffs = layers[0]->OptimalParameterisedSymbolicBoundTightening(); + optimizeBoundsWithLpRelaxation( layers, false, optimal_coeffs, polygonal_tightenings ); + optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs, polygonal_tightenings ); +} + void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ) { @@ -417,9 +429,11 @@ void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &coeffs ) +void LPFormulator::optimizeBoundsOfNeuronsWithLpRelaxation( + ThreadArgument &args, + bool backward, + const Vector &coeffs, + const Vector &polygonal_tightenings ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -532,9 +546,11 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args mtx.lock(); if ( backward ) - createLPRelaxationAfter( layers, *freeSolver, lastIndexOfRelaxation, coeffs ); + createLPRelaxationAfter( + layers, *freeSolver, lastIndexOfRelaxation, coeffs, polygonal_tightenings ); else - createLPRelaxation( layers, *freeSolver, lastIndexOfRelaxation, coeffs ); + createLPRelaxation( + layers, *freeSolver, lastIndexOfRelaxation, coeffs, polygonal_tightenings ); mtx.unlock(); // spawn a thread to tighten the bounds for the current variable @@ -657,7 +673,8 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & void LPFormulator::createLPRelaxation( const Map &layers, GurobiWrapper &gurobi, unsigned lastLayer, - const Vector &coeffs ) + const Vector &coeffs, + const Vector &polygonal_tightenings ) { for ( const auto &layer : layers ) { @@ -675,12 +692,15 @@ void LPFormulator::createLPRelaxation( const Map &layers, addLayerToParameterisedModel( gurobi, layer.second, false, currentLayerCoeffs ); } } + addPolyognalTighteningsToLpRelaxation( gurobi, layers, 0, lastLayer, polygonal_tightenings ); } -void LPFormulator::createLPRelaxationAfter( const Map &layers, - GurobiWrapper &gurobi, - unsigned firstLayer, - const Vector &coeffs ) +void LPFormulator::createLPRelaxationAfter( + const Map &layers, + GurobiWrapper &gurobi, + unsigned firstLayer, + const Vector &coeffs, + const Vector &polygonal_tightenings ) { unsigned depth = GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH; std::priority_queue, std::greater> layersToAdd; @@ -718,6 +738,8 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers } } } + addPolyognalTighteningsToLpRelaxation( + gurobi, layers, firstLayer, layersToAdd.top(), polygonal_tightenings ); } void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, @@ -2143,6 +2165,69 @@ void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &g } } +void LPFormulator::addPolyognalTighteningsToLpRelaxation( + GurobiWrapper &gurobi, + const Map &layers, + unsigned firstLayer, + unsigned lastLayer, + const Vector &polygonal_tightenings ) +{ + for ( const auto &tightening : polygonal_tightenings ) + { + bool out_of_bounds = false; + Map neuronToCoefficient = tightening._neuronToCoefficient; + PolygonalTightening::PolygonalBoundType type = tightening._type; + double value = tightening._value; + List terms; + + for ( const auto &pair : neuronToCoefficient ) + { + unsigned currentLayerIndex = pair.first._layer; + if ( currentLayerIndex < firstLayer || currentLayerIndex > lastLayer ) + { + out_of_bounds = true; + } + } + + if ( out_of_bounds ) + { + continue; + } + + for ( const auto &pair : neuronToCoefficient ) + { + unsigned currentLayerIndex = pair.first._layer; + unsigned i = pair.first._neuron; + double coeff = pair.second; + Layer *layer = layers[currentLayerIndex]; + + if ( !layer->neuronEliminated( i ) ) + { + unsigned variable = layer->neuronToVariable( i ); + String variableName = Stringf( "x%u", variable ); + if ( !gurobi.containsVariable( variableName ) ) + { + gurobi.addVariable( variableName, layer->getLb( i ), layer->getUb( i ) ); + } + terms.append( GurobiWrapper::Term( coeff, Stringf( "x%u", variable ) ) ); + } + else + { + value -= coeff * layer->getEliminatedNeuronValue( i ); + } + } + + if ( type == PolygonalTightening::UB ) + { + gurobi.addLeqConstraint( terms, value ); + } + else + { + gurobi.addGeqConstraint( terms, value ); + } + } +} + void LPFormulator::setCutoff( double cutoff ) { _cutoffInUse = true; diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index ac9ecc4557..cd67766b15 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -20,6 +20,7 @@ #include "LayerOwner.h" #include "Map.h" #include "ParallelSolver.h" +#include "PolygonalTightening.h" #include #include @@ -53,8 +54,11 @@ class LPFormulator : public ParallelSolver */ void optimizeBoundsWithLpRelaxation( const Map &layers, bool backward = false, - const Vector &coeffs = Vector( {} ) ); + const Vector &coeffs = Vector( {} ), + const Vector &polygonal_tightenings = + Vector( {} ) ); void optimizeBoundsWithPreimageApproximation( Map &layers ); + void optimizeBoundsWithPMNR( Map &layers ); void optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ); void optimizeBoundsWithIncrementalLpRelaxation( const Map &layers ); @@ -75,11 +79,15 @@ class LPFormulator : public ParallelSolver void createLPRelaxation( const Map &layers, GurobiWrapper &gurobi, unsigned lastLayer = UINT_MAX, - const Vector &coeffs = Vector( {} ) ); + const Vector &coeffs = Vector( {} ), + const Vector &polygonal_tightenings = + Vector( {} ) ); void createLPRelaxationAfter( const Map &layers, GurobiWrapper &gurobi, unsigned firstLayer, - const Vector &coeffs = Vector( {} ) ); + const Vector &coeffs = Vector( {} ), + const Vector &polygonal_tightenings = + Vector( {} ) ); double solveLPRelaxation( GurobiWrapper &gurobi, const Map &layers, MinOrMax minOrMax, @@ -131,10 +139,12 @@ class LPFormulator : public ParallelSolver const Layer *layer, bool createVariables ); - void - optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args, - bool backward, - const Vector &coeffs = Vector( {} ) ); + void optimizeBoundsOfNeuronsWithLpRelaxation( + ThreadArgument &args, + bool backward, + const Vector &coeffs = Vector( {} ), + const Vector &polygonal_tightenings = + Vector( {} ) ); // Create LP relaxations depending on external parameters. @@ -163,6 +173,12 @@ class LPFormulator : public ParallelSolver bool createVariables, const Vector &coeffs ); + void addPolyognalTighteningsToLpRelaxation( + GurobiWrapper &gurobi, + const Map &layers, + unsigned firstLayer, + unsigned lastLayer, + const Vector &polygonal_tightenings ); /* Optimize for the min/max value of variableName with respect to the constraints diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 46db84ae8a..303fcd53dc 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -77,7 +77,9 @@ void Layer::allocateMemory() if ( Options::get()->getSymbolicBoundTighteningType() == SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING || Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR ) { _symbolicLb = new double[_size * _inputLayerSize]; _symbolicUb = new double[_size * _inputLayerSize]; @@ -3580,22 +3582,21 @@ void Layer::computeParameterisedSymbolicBoundsForRelu( const Vector &coe variable. If they are tigheter than what was previously known, store them. */ - if ( _lb[i] < _symbolicLbOfLb[i] ) + if ( receive ) { - _lb[i] = _symbolicLbOfLb[i]; - - if ( receive ) + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; + } - if ( receive ) + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } } } } @@ -3797,22 +3798,21 @@ void Layer::computeParameterisedSymbolicBoundsForSign( const Vector &coe variable. If they are tigheter than what was previously known, store them. */ - if ( _lb[i] < _symbolicLbOfLb[i] ) + if ( receive ) { - _lb[i] = _symbolicLbOfLb[i]; - - if ( receive ) + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; + } - if ( receive ) + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } } } } @@ -4041,22 +4041,21 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( const Vector variable. If they are tigheter than what was previously known, store them. */ - if ( _lb[i] < _symbolicLbOfLb[i] ) + if ( receive ) { - _lb[i] = _symbolicLbOfLb[i]; - - if ( receive ) + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; + } - if ( receive ) + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } } } } @@ -4279,22 +4278,21 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector variable. If they are tigheter than what was previously known, store them. */ - if ( _lb[i] < _symbolicLbOfLb[i] ) + if ( receive ) { - _lb[i] = _symbolicLbOfLb[i]; - - if ( receive ) + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; + } - if ( receive ) + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } } } } @@ -4387,6 +4385,19 @@ const Vector Layer::OptimalParameterisedSymbolicBoundTightening() return optimal_coeffs; } + +const Vector Layer::OptimizeParameterisedPolygonalTightening() +{ + // ... +} + +const Vector +Layer::OptimizeSingleParameterisedPolygonalTightening( PolygonalTightening tightening, + Vector prevTightenings ) +{ + // ... +} + double Layer::EstimateVolume( const Vector &coeffs ) { // First, run parameterised symbolic bound propagation. @@ -4419,7 +4430,7 @@ double Layer::EstimateVolume( const Vector &coeffs ) double ub = inputLayer->getUb( index ); if ( lb == ub ) - return 0; + continue; log_box_volume += std::log( ub - lb ); } @@ -4474,34 +4485,53 @@ double Layer::calculateDifferenceFromSymbolic( Map &point, uns return std::max( _ub[i] - upperSum, lowerSum - _lb[i] ); } -unsigned Layer::getNumberOfParameters( const Map &layers ) +double Layer::getParameterisdPolygonalTighteningLowerBound( + const Vector &coeffs, + const Vector &gamma, + PolygonalTightening tightening, + Vector prevTightenings ) const { - unsigned index = 0; + // ... +} + + +const List Layer::selectConstraints( const Map &layers ) const +{ + // ... +} + +const Vector +Layer::generatePolygonalTightenings( const Map &layers ) const +{ + // ... +} + + +unsigned Layer::getNumberOfParametersPerType( Type t ) const +{ + if ( t == Layer::RELU || t == Layer::LEAKY_RELU ) + return 1; + + if ( t == Layer::SIGN || t == Layer::BILINEAR ) + return 2; + + return 0; +} + +unsigned Layer::getNumberOfParameters( const Map &layers ) const +{ + unsigned num = 0; for ( auto pair : layers ) { unsigned layerIndex = pair.first; Layer *layer = layers[layerIndex]; - switch ( layer->getLayerType() ) - { - case Layer::RELU: - case Layer::LEAKY_RELU: - index++; - break; - - case Layer::SIGN: - case Layer::BILINEAR: - index += 2; - break; - - default: - break; - } + num += getNumberOfParametersPerType( layer->getLayerType() ); } - return index; + return num; } Map> Layer::getParametersForLayers( const Map &layers, - const Vector &coeffs ) + const Vector &coeffs ) const { unsigned index = 0; Map> layerIndicesToParameters; @@ -4509,33 +4539,30 @@ Map> Layer::getParametersForLayers( const MapgetLayerType() ) - { - case Layer::RELU: - case Layer::LEAKY_RELU: - { - layerIndicesToParameters.insert( layerIndex, Vector( { coeffs[index] } ) ); - index++; - } - break; - - case Layer::SIGN: - case Layer::BILINEAR: + unsigned n_coeffs = getNumberOfParametersPerType( layer->getLayerType() ); + Vector current_coeffs( n_coeffs ); + for ( unsigned i = 0; i < n_coeffs; i++ ) { - layerIndicesToParameters.insert( - layerIndex, Vector( { coeffs[index], coeffs[index + 1] } ) ); - index += 2; - } - break; - - default: - layerIndicesToParameters.insert( layerIndex, Vector( {} ) ); - break; + current_coeffs[i] = coeffs[index + i]; } + layerIndicesToParameters.insert( layerIndex, current_coeffs ); + index += n_coeffs; } return layerIndicesToParameters; } +unsigned Layer::countNonlinearLayers( const Map &layers ) const +{ + unsigned num = 0; + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + num += layer->getLayerType() != Layer::WEIGHTED_SUM ? 1 : 0; + } + return num; +} + double Layer::softmaxLSELowerBound( const Vector &inputs, const Vector &inputLbs, const Vector &inputUbs, diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index a9899964c2..8a0fe325c4 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -140,17 +140,36 @@ class Layer void computeSymbolicBounds(); void computeParameterisedSymbolicBounds( const Vector &coeffs, bool receive = false ); + + // Get number of optimizable parameters for parameterised SBT relaxation per layer type. + unsigned getNumberOfParametersPerType( Type t ) const; + // Get total number of optimizable parameters for parameterised SBT relaxation. - unsigned getNumberOfParameters( const Map &layers ); + unsigned getNumberOfParameters( const Map &layers ) const; + + // Get total number of non-weighted sum layers for INVPROP. + unsigned countNonlinearLayers( const Map &layers ) const; // Get map containing vector of optimizable parameters for parameterised SBT relaxation for // every layer index. Map> getParametersForLayers( const Map &layers, - const Vector &coeffs ); + const Vector &coeffs ) const; // Return optimizable parameters which minimize parameterised SBT bounds' volume. const Vector OptimalParameterisedSymbolicBoundTightening(); + // Heuristically select neurons and polygonal tightenings for PMNR. + const List selectConstraints( const Map &layers ) const; + + const Vector + generatePolygonalTightenings( const Map &layers ) const; + + // Optimize biases of generated parameterised polygonal tightenings. + const Vector OptimizeParameterisedPolygonalTightening(); + const Vector + OptimizeSingleParameterisedPolygonalTightening( PolygonalTightening tightening, + Vector prevTightenings ); + /* Preprocessing functionality: variable elimination and reindexing */ @@ -322,6 +341,13 @@ class Layer // SBT relaxation. double calculateDifferenceFromSymbolic( Map &point, unsigned i ) const; + // Get current lower bound for selected parameterised polygonal tightenings' biases. + double getParameterisdPolygonalTighteningLowerBound( + const Vector &coeffs, + const Vector &gamma, + PolygonalTightening tightening, + Vector prevTightenings ) const; + /* Helper functions for interval bound tightening */ diff --git a/src/nlr/LayerOwner.h b/src/nlr/LayerOwner.h index 180a4fdebd..b8ae03cb0f 100644 --- a/src/nlr/LayerOwner.h +++ b/src/nlr/LayerOwner.h @@ -17,6 +17,7 @@ #define __LayerOwner_h__ #include "ITableau.h" +#include "PolygonalTightening.h" #include "Tightening.h" namespace NLR { @@ -35,6 +36,7 @@ class LayerOwner virtual const ITableau *getTableau() const = 0; virtual unsigned getNumberOfLayers() const = 0; virtual void receiveTighterBound( Tightening tightening ) = 0; + virtual void receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ) = 0; }; } // namespace NLR diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index ed07a1d2d5..94b8c57833 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -198,6 +198,23 @@ void NetworkLevelReasoner::clearConstraintTightenings() _boundTightenings.clear(); } +void NetworkLevelReasoner::receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ) +{ + _polygonalBoundTightenings.append( polygonal_tightening ); +} + +void NetworkLevelReasoner::getConstraintPolygonalTightenings( + List &polygonal_tightenings ) +{ + polygonal_tightenings = _polygonalBoundTightenings; + _polygonalBoundTightenings.clear(); +} + +void NetworkLevelReasoner::clearConstraintPolygonalTightenings() +{ + _polygonalBoundTightenings.clear(); +} + void NetworkLevelReasoner::symbolicBoundPropagation() { for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) @@ -235,6 +252,9 @@ void NetworkLevelReasoner::lpRelaxationPropagation() else if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) lpFormulator.optimizeBoundsWithPreimageApproximation( _layerIndexToLayer ); + else if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR ) + lpFormulator.optimizeBoundsWithPMNR( _layerIndexToLayer ); else if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::LP_RELAXATION ) lpFormulator.optimizeBoundsWithLpRelaxation( _layerIndexToLayer ); diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index 03389789c4..0e62a9b027 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -24,6 +24,7 @@ #include "MatrixMultiplication.h" #include "NeuronIndex.h" #include "PiecewiseLinearFunctionType.h" +#include "PolygonalTightening.h" #include "Tightening.h" #include "Vector.h" @@ -120,6 +121,13 @@ class NetworkLevelReasoner : public LayerOwner - getConstraintTightenings: this is the function that an external user calls in order to collect the tighter bounds discovered by the NLR. + + - receiveTighterPolygonalBound: this is a callback from the layer + objects, through which they report tighter polygonal bounds. + + - getConstraintPolygonalTightenings: this is the function that an + external user calls in order to collect the tighter polygonal bounds + discovered by the NLR. */ void setTableau( const ITableau *tableau ); @@ -141,6 +149,10 @@ class NetworkLevelReasoner : public LayerOwner void getConstraintTightenings( List &tightenings ); void clearConstraintTightenings(); + void receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ); + void getConstraintPolygonalTightenings( List &polygonal_tightenings ); + void clearConstraintPolygonalTightenings(); + /* For debugging purposes: dump the network topology */ @@ -211,9 +223,9 @@ class NetworkLevelReasoner : public LayerOwner Map _layerIndexToLayer; const ITableau *_tableau; - // Tightenings discovered by the various layers + // Tightenings and Polyognal Tightenings discovered by the various layers List _boundTightenings; - + List _polygonalBoundTightenings; std::unique_ptr _deepPolyAnalysis; From cd522f7b5ab7b4c8406f92c926db14b670cf0d21 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 17 Jul 2025 20:27:37 +0300 Subject: [PATCH 66/81] Current progress in PMNR: 1. Implement initializeOutputLayerSymbolicBounds using DeepPoly module. 2. Implement OptimizeSingleParameterisedPolygonalTightening and most of selectConstraints, getParameterisdPolygonalTighteningLowerBound. 3. Move some bound propagation functions and helpers from Layer, LPFormulator to NetworkLevelReasoner. 4. Delete duplicate helpers from DeepPolySoftmaxElement and replace calls to their copies in Layer. --- src/configuration/GlobalConfiguration.cpp | 4 + src/configuration/GlobalConfiguration.h | 9 + src/configuration/OptionParser.cpp | 5 +- src/configuration/Options.cpp | 10 +- src/engine/Engine.cpp | 19 +- src/engine/MILPEncoder.cpp | 23 +- src/engine/MILPSolverBoundTighteningType.h | 14 +- src/nlr/DeepPolyAnalysis.cpp | 25 +- src/nlr/DeepPolyAnalysis.h | 13 +- src/nlr/DeepPolyElement.cpp | 99 +- src/nlr/DeepPolyElement.h | 25 +- src/nlr/DeepPolySoftmaxElement.cpp | 344 +----- src/nlr/DeepPolySoftmaxElement.h | 55 - src/nlr/DeepPolyWeightedSumElement.cpp | 27 + src/nlr/LPFormulator.cpp | 100 +- src/nlr/LPFormulator.h | 22 +- src/nlr/Layer.cpp | 409 ++------ src/nlr/Layer.h | 168 +-- src/nlr/NetworkLevelReasoner.cpp | 1099 +++++++++++++++++++- src/nlr/NetworkLevelReasoner.h | 71 ++ src/nlr/tests/Test_DeepPolyAnalysis.h | 27 +- src/nlr/tests/Test_LPRelaxation.h | 109 +- src/nlr/tests/Test_NetworkLevelReasoner.h | 28 +- 23 files changed, 1719 insertions(+), 986 deletions(-) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index 0dc1fc42b8..e5623a826c 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -83,6 +83,10 @@ const double GlobalConfiguration::INVPROP_WEIGHT_DECAY = 0; const double GlobalConfiguration::INVPROP_INITIAL_ALPHA = 0.5; const double GlobalConfiguration::INVPROP_INITIAL_GAMMA = 0.025; +const unsigned GlobalConfiguration::PMNR_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::PMNR_MAX_ITERATIONS = 100; +const unsigned GlobalConfiguration::PMNR_SELECTED_NEURONS = 3; + const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; const double GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT = 0.00000000001; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index 3191c82b33..3efd2cae60 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -190,6 +190,15 @@ class GlobalConfiguration // Initial gamma values for INVPROP optimization. static const double INVPROP_INITIAL_GAMMA; + // Random seed for PMNR (with randomized hyperplanes). + static const unsigned PMNR_RANDOM_SEED; + + // Random seed for PMNR (with randomized hyperplanes). + static const unsigned PMNR_MAX_ITERATIONS; + + // Random seed for PMNR (with heuristically selected hyperplanes). + static const unsigned PMNR_SELECTED_NEURONS; + // How often should projected steepest edge reset the reference space? static const unsigned PSE_ITERATIONS_BEFORE_RESET; diff --git a/src/configuration/OptionParser.cpp b/src/configuration/OptionParser.cpp index 249862a930..cd1c182fce 100644 --- a/src/configuration/OptionParser.cpp +++ b/src/configuration/OptionParser.cpp @@ -267,8 +267,9 @@ void OptionParser::initialize() &( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ) ) ->default_value( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ), "The MILP solver bound tightening type: " - "lp/backward-once/backward-converge/backward-preimage-approx/backward-pmnr/lp-inc/milp/" - "milp-inc/" + "lp/backward-once/backward-converge/backward-preimage-approx/backward-invprop/" + "backward-pmnr-random/" + "backward-pmnr-gradient/backward-pmnr-bbps-heuristic/lp-inc/milp/milp-inc/" "iter-prop/none." ) #endif ; diff --git a/src/configuration/Options.cpp b/src/configuration/Options.cpp index d9fa7b0b3b..8e9a4b5f87 100644 --- a/src/configuration/Options.cpp +++ b/src/configuration/Options.cpp @@ -211,8 +211,14 @@ MILPSolverBoundTighteningType Options::getMILPSolverBoundTighteningType() const return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE; if ( strategyString == "backward-preimage-approx" ) return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX; - if ( strategyString == "backward-pmnr" ) - return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR; + if ( strategyString == "backward-invprop" ) + return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP; + if ( strategyString == "backward-pmnr-random" ) + return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM; + if ( strategyString == "backward-pmnr-gradient" ) + return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT; + if ( strategyString == "backward-pmnr-bbps" ) + return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS; else if ( strategyString == "milp" ) return MILPSolverBoundTighteningType::MILP_ENCODING; else if ( strategyString == "milp-inc" ) diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index 0c5163d660..1802cd2d2a 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -1595,7 +1595,10 @@ void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS: _networkLevelReasoner->lpRelaxationPropagation(); break; case MILPSolverBoundTighteningType::MILP_ENCODING: @@ -1670,7 +1673,14 @@ void Engine::performAdditionalBackwardAnalysisIfNeeded() printf( "Backward analysis tightened %u bounds\n", tightened ); } - if ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR ) + if ( _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP || + _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || + _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT || + _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) { unsigned tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); if ( _verbosity > 0 ) @@ -1711,7 +1721,10 @@ void Engine::performMILPSolverBoundedTighteningForSingleLayer( unsigned targetIn case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS: case MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION: case MILPSolverBoundTighteningType::NONE: return; diff --git a/src/engine/MILPEncoder.cpp b/src/engine/MILPEncoder.cpp index 2041ff57b1..2e4bf59947 100644 --- a/src/engine/MILPEncoder.cpp +++ b/src/engine/MILPEncoder.cpp @@ -16,9 +16,9 @@ #include "MILPEncoder.h" -#include "DeepPolySoftmaxElement.h" #include "FloatUtils.h" #include "GurobiWrapper.h" +#include "Layer.h" #include "TimeUtils.h" MILPEncoder::MILPEncoder( const ITableau &tableau ) @@ -629,14 +629,14 @@ void MILPEncoder::encodeSoftmaxConstraint( GurobiWrapper &gurobi, SoftmaxConstra } if ( !useLSE2 ) { - symbolicLowerBias = NLR::DeepPolySoftmaxElement::LSELowerBound( - sourceMids, sourceLbs, sourceUbs, i ); + symbolicLowerBias = + NLR::Layer::LSELowerBound( sourceMids, sourceLbs, sourceUbs, i ); if ( !FloatUtils::wellFormed( symbolicLowerBias ) ) wellFormed = false; for ( unsigned j = 0; j < size; ++j ) { - double dldj = NLR::DeepPolySoftmaxElement::dLSELowerBound( - sourceMids, sourceLbs, sourceUbs, i, j ); + double dldj = + NLR::Layer::dLSELowerBound( sourceMids, sourceLbs, sourceUbs, i, j ); if ( !FloatUtils::wellFormed( dldj ) ) wellFormed = false; terms.append( @@ -646,14 +646,14 @@ void MILPEncoder::encodeSoftmaxConstraint( GurobiWrapper &gurobi, SoftmaxConstra } else { - symbolicLowerBias = NLR::DeepPolySoftmaxElement::LSELowerBound2( - sourceMids, sourceLbs, sourceUbs, i ); + symbolicLowerBias = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); if ( !FloatUtils::wellFormed( symbolicLowerBias ) ) wellFormed = false; for ( unsigned j = 0; j < size; ++j ) { - double dldj = NLR::DeepPolySoftmaxElement::dLSELowerBound2( - sourceMids, sourceLbs, sourceUbs, i, j ); + double dldj = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); if ( !FloatUtils::wellFormed( dldj ) ) wellFormed = false; terms.append( @@ -667,15 +667,14 @@ void MILPEncoder::encodeSoftmaxConstraint( GurobiWrapper &gurobi, SoftmaxConstra // Upper-bound wellFormed = true; double symbolicUpperBias = - NLR::DeepPolySoftmaxElement::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); if ( !FloatUtils::wellFormed( symbolicUpperBias ) ) wellFormed = false; terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariables[i] ) ) ); for ( unsigned j = 0; j < size; ++j ) { - double dudj = NLR::DeepPolySoftmaxElement::dLSEUpperbound( - sourceMids, targetLbs, targetUbs, i, j ); + double dudj = NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); if ( !FloatUtils::wellFormed( dudj ) ) wellFormed = false; terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariables[j] ) ) ); diff --git a/src/engine/MILPSolverBoundTighteningType.h b/src/engine/MILPSolverBoundTighteningType.h index ee4539fdc3..a723455be6 100644 --- a/src/engine/MILPSolverBoundTighteningType.h +++ b/src/engine/MILPSolverBoundTighteningType.h @@ -37,10 +37,18 @@ enum class MILPSolverBoundTighteningType { // Perform backward analysis using the PreimageApproximation Algorithm (arXiv:2305.03686v4 // [cs.SE]) BACKWARD_ANALYSIS_PREIMAGE_APPROX = 7, - // Perform backward analysis using the PMNR Algorithm - BACKWARD_ANALYSIS_PMNR = 8, + // Perform backward analysis using INVPROP (arXiv:2302.01404v4 [cs.LG]) + BACKWARD_ANALYSIS_INVPROP = 8, + // Perform backward analysis using PMNR with random neuron selection. + BACKWARD_ANALYSIS_PMNR_RANDOM = 9, + // Perform backward analysis using PMNR with maximum gradient neuron selection (arXiv:1804.10829 + // [cs.AI]). + BACKWARD_ANALYSIS_PMNR_GRADIENT = 10, + // Perform backward analysis using PMNR with BBPS-based neuron selection (arXiv:2405.21063v3 + // [cs.LG]). + BACKWARD_ANALYSIS_PMNR_BBPS = 11, // Option to have no MILP bound tightening performed - NONE = 10, + NONE = 20, }; #endif // __MILPSolverBoundTighteningType_h__ diff --git a/src/nlr/DeepPolyAnalysis.cpp b/src/nlr/DeepPolyAnalysis.cpp index 6a7ddb09db..d97c23edba 100644 --- a/src/nlr/DeepPolyAnalysis.cpp +++ b/src/nlr/DeepPolyAnalysis.cpp @@ -39,14 +39,24 @@ namespace NLR { -DeepPolyAnalysis::DeepPolyAnalysis( LayerOwner *layerOwner ) +DeepPolyAnalysis::DeepPolyAnalysis( LayerOwner *layerOwner, + bool storeSymbolicBounds, + Map *outputLayerSymbolicLb, + Map *outputLayerSymbolicUb, + Map *outputLayerSymbolicLowerBias, + Map *outputLayerSymbolicUpperBias ) : _layerOwner( layerOwner ) + , _storeSymbolicBounds( storeSymbolicBounds ) , _work1SymbolicLb( NULL ) , _work1SymbolicUb( NULL ) , _work2SymbolicLb( NULL ) , _work2SymbolicUb( NULL ) , _workSymbolicLowerBias( NULL ) , _workSymbolicUpperBias( NULL ) + , _outputLayerSymbolicLb( outputLayerSymbolicLb ) + , _outputLayerSymbolicUb( outputLayerSymbolicUb ) + , _outputLayerSymbolicLowerBias( outputLayerSymbolicLowerBias ) + , _outputLayerSymbolicUpperBias( outputLayerSymbolicUpperBias ) { const Map &layers = _layerOwner->getLayerIndexToLayer(); // Get the maximal layer size @@ -148,6 +158,8 @@ void DeepPolyAnalysis::run() { if ( layer->neuronEliminated( j ) ) continue; + if ( _storeSymbolicBounds && index == _layerOwner->getNumberOfLayers() - 1 ) + continue; double lb = deepPolyElement->getLowerBound( j ); if ( layer->getLb( j ) < lb ) { @@ -157,6 +169,7 @@ void DeepPolyAnalysis::run() layer->getLb( j ), lb ) ); layer->setLb( j, lb ); + _layerOwner->receiveTighterBound( Tightening( layer->neuronToVariable( j ), lb, Tightening::LB ) ); } @@ -169,6 +182,7 @@ void DeepPolyAnalysis::run() layer->getUb( j ), ub ) ); layer->setUb( j, ub ); + _layerOwner->receiveTighterBound( Tightening( layer->neuronToVariable( j ), ub, Tightening::UB ) ); } @@ -235,6 +249,15 @@ DeepPolyElement *DeepPolyAnalysis::createDeepPolyElement( Layer *layer ) else throw NLRError( NLRError::LAYER_TYPE_NOT_SUPPORTED, Stringf( "Layer %u not yet supported", layer->getLayerType() ).ascii() ); + + if ( _storeSymbolicBounds && layer->getLayerIndex() == _layerOwner->getNumberOfLayers() - 1 ) + { + deepPolyElement->setStoreSymbolicBounds( true ); + } + deepPolyElement->setOutputLayerSymbolicBoundsMemory( _outputLayerSymbolicLb, + _outputLayerSymbolicUb, + _outputLayerSymbolicLowerBias, + _outputLayerSymbolicUpperBias ); return deepPolyElement; } diff --git a/src/nlr/DeepPolyAnalysis.h b/src/nlr/DeepPolyAnalysis.h index ddadd1c84d..e2066874b6 100644 --- a/src/nlr/DeepPolyAnalysis.h +++ b/src/nlr/DeepPolyAnalysis.h @@ -28,13 +28,19 @@ namespace NLR { class DeepPolyAnalysis { public: - DeepPolyAnalysis( LayerOwner *layerOwner ); + DeepPolyAnalysis( LayerOwner *layerOwner, + bool storeSymbolicBounds = false, + Map *outputLayerSymbolicLb = NULL, + Map *outputLayerSymbolicUb = NULL, + Map *outputLayerSymbolicLowerBias = NULL, + Map *outputLayerSymbolicUpperBias = NULL ); ~DeepPolyAnalysis(); void run(); private: LayerOwner *_layerOwner; + bool _storeSymbolicBounds; /* Maps layer index to the abstract element @@ -51,6 +57,11 @@ class DeepPolyAnalysis double *_workSymbolicLowerBias; double *_workSymbolicUpperBias; + Map *_outputLayerSymbolicLb; + Map *_outputLayerSymbolicUb; + Map *_outputLayerSymbolicLowerBias; + Map *_outputLayerSymbolicUpperBias; + unsigned _maxLayerSize; void allocateMemory(); diff --git a/src/nlr/DeepPolyElement.cpp b/src/nlr/DeepPolyElement.cpp index d95e62ceb6..86afb43afd 100644 --- a/src/nlr/DeepPolyElement.cpp +++ b/src/nlr/DeepPolyElement.cpp @@ -17,10 +17,11 @@ namespace NLR { -DeepPolyElement::DeepPolyElement() +DeepPolyElement::DeepPolyElement( bool storeSymbolicBounds ) : _layer( NULL ) , _size( 0 ) , _layerIndex( 0 ) + , _storeSymbolicBounds( storeSymbolicBounds ) , _symbolicLb( NULL ) , _symbolicUb( NULL ) , _symbolicLowerBias( NULL ) @@ -92,6 +93,11 @@ double DeepPolyElement::getUpperBound( unsigned index ) const return _ub[index]; } +void DeepPolyElement::setStoreSymbolicBounds( bool storeSymbolicBounds ) +{ + _storeSymbolicBounds = storeSymbolicBounds; +} + double DeepPolyElement::getLowerBoundFromLayer( unsigned index ) const { ASSERT( index < getSize() ); @@ -156,4 +162,95 @@ void DeepPolyElement::setWorkingMemory( double *work1SymbolicLb, _workSymbolicUpperBias = workSymbolicUpperBias; } +void DeepPolyElement::setOutputLayerSymbolicBoundsMemory( + Map *outputLayerSymbolicLb, + Map *outputLayerSymbolicUb, + Map *outputLayerSymbolicLowerBias, + Map *outputLayerSymbolicUpperBias ) +{ + _outputLayerSymbolicLb = outputLayerSymbolicLb; + _outputLayerSymbolicUb = outputLayerSymbolicUb; + _outputLayerSymbolicLowerBias = outputLayerSymbolicLowerBias; + _outputLayerSymbolicUpperBias = outputLayerSymbolicUpperBias; +} + +void DeepPolyElement::storeWorkSymbolicBounds( + unsigned sourceLayerSize, + double *work1SymbolicLb, + double *work1SymbolicUb, + double *workSymbolicLowerBias, + double *workSymbolicUpperBias, + Map &residualLb, + Map &residualUb, + Set &residualLayerIndices, + const Map &deepPolyElementsBefore ) +{ + double *workSymbolicLowerBias2 = new double[_size]; + double *workSymbolicUpperBias2 = new double[_size]; + memcpy( workSymbolicLowerBias2, workSymbolicLowerBias, _size * sizeof( double ) ); + memcpy( workSymbolicUpperBias2, workSymbolicUpperBias, _size * sizeof( double ) ); + + for ( const auto &residualLayerIndex : residualLayerIndices ) + { + DeepPolyElement *residualElement = deepPolyElementsBefore[residualLayerIndex]; + double *currentResidualLb = residualLb[residualLayerIndex]; + double *currentResidualUb = residualUb[residualLayerIndex]; + + // Get concrete bounds + for ( unsigned i = 0; i < residualElement->getSize(); ++i ) + { + double sourceLb = residualElement->getLowerBoundFromLayer( i ) - + GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; + double sourceUb = residualElement->getUpperBoundFromLayer( i ) + + GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; + + for ( unsigned j = 0; j < _size; ++j ) + { + // Compute lower bound + double weight = currentResidualLb[i * _size + j]; + if ( weight >= 0 ) + { + workSymbolicLowerBias2[j] += ( weight * sourceLb ); + } + else + { + workSymbolicLowerBias2[j] += ( weight * sourceUb ); + } + + // Compute upper bound + weight = currentResidualUb[i * _size + j]; + if ( weight >= 0 ) + { + workSymbolicUpperBias2[j] += ( weight * sourceUb ); + } + else + { + workSymbolicUpperBias2[j] += ( weight * sourceLb ); + } + } + } + } + + double *currentSymbolicLb = ( *_outputLayerSymbolicLb )[_layerIndex]; + double *currentSymbolicUb = ( *_outputLayerSymbolicUb )[_layerIndex]; + double *currentSymbolicLowerBias = ( *_outputLayerSymbolicLowerBias )[_layerIndex]; + double *currentSymbolicUpperBias = ( *_outputLayerSymbolicUpperBias )[_layerIndex]; + memcpy( currentSymbolicLb, work1SymbolicLb, _size * sourceLayerSize * sizeof( double ) ); + memcpy( currentSymbolicUb, work1SymbolicUb, _size * sourceLayerSize * sizeof( double ) ); + memcpy( currentSymbolicLowerBias, workSymbolicLowerBias2, _size * sizeof( double ) ); + memcpy( currentSymbolicUpperBias, workSymbolicUpperBias2, _size * sizeof( double ) ); + + if ( workSymbolicLowerBias2 ) + { + delete[] workSymbolicLowerBias2; + workSymbolicLowerBias2 = NULL; + } + + if ( workSymbolicUpperBias2 ) + { + delete[] workSymbolicUpperBias2; + workSymbolicUpperBias2 = NULL; + } +} + } // namespace NLR diff --git a/src/nlr/DeepPolyElement.h b/src/nlr/DeepPolyElement.h index ee86d62eca..b4be631490 100644 --- a/src/nlr/DeepPolyElement.h +++ b/src/nlr/DeepPolyElement.h @@ -28,7 +28,7 @@ namespace NLR { class DeepPolyElement { public: - DeepPolyElement(); + DeepPolyElement( bool storeSymbolicBounds = false ); virtual ~DeepPolyElement(){}; // execute the abstract layer based on the abstract layers topologically @@ -68,6 +68,7 @@ class DeepPolyElement double *getSymbolicUpperBias() const; double getLowerBound( unsigned index ) const; double getUpperBound( unsigned index ) const; + void setStoreSymbolicBounds( bool storeSymbolicBounds ); void setWorkingMemory( double *work1SymbolicLb, double *work1SymbolicUb, @@ -76,6 +77,22 @@ class DeepPolyElement double *workSymbolicLowerBias, double *workSymbolicUpperBias ); + void + setOutputLayerSymbolicBoundsMemory( Map *outputLayerSymbolicLb, + Map *outputLayerSymbolicUb, + Map *outputLayerSymbolicLowerBias, + Map *outputLayerSymbolicUpperBias ); + + void storeWorkSymbolicBounds( unsigned sourceLayerSize, + double *work1SymbolicLb, + double *work1SymbolicUb, + double *workSymbolicLowerBias, + double *workSymbolicUpperBias, + Map &residualLb, + Map &residualUb, + Set &residualLayerIndices, + const Map &deepPolyElementsBefore ); + double getLowerBoundFromLayer( unsigned index ) const; double getUpperBoundFromLayer( unsigned index ) const; @@ -83,6 +100,7 @@ class DeepPolyElement Layer *_layer; unsigned _size; unsigned _layerIndex; + bool _storeSymbolicBounds; /* Abstract element described in @@ -103,6 +121,11 @@ class DeepPolyElement double *_workSymbolicLowerBias; double *_workSymbolicUpperBias; + Map *_outputLayerSymbolicLb; + Map *_outputLayerSymbolicUb; + Map *_outputLayerSymbolicLowerBias; + Map *_outputLayerSymbolicUpperBias; + void allocateMemory(); void freeMemoryIfNeeded(); diff --git a/src/nlr/DeepPolySoftmaxElement.cpp b/src/nlr/DeepPolySoftmaxElement.cpp index 2b6acb9c1c..574d3970b7 100644 --- a/src/nlr/DeepPolySoftmaxElement.cpp +++ b/src/nlr/DeepPolySoftmaxElement.cpp @@ -89,8 +89,8 @@ void DeepPolySoftmaxElement::execute( } } - double lb = linearLowerBound( sourceLbs, sourceUbs, index ); - double ub = linearUpperBound( sourceLbs, sourceUbs, index ); + double lb = Layer::linearLowerBound( sourceLbs, sourceUbs, index ); + double ub = Layer::linearUpperBound( sourceLbs, sourceUbs, index ); if ( lb > _lb[i] ) _lb[i] = lb; if ( ub < _ub[i] ) @@ -124,11 +124,11 @@ void DeepPolySoftmaxElement::execute( if ( !useLSE2 ) { _symbolicLowerBias[i] = - LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); + Layer::LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &sourceIndex : sources ) { - double dldj = - dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + double dldj = Layer::dLSELowerBound( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); _symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; @@ -137,23 +137,24 @@ void DeepPolySoftmaxElement::execute( else { _symbolicLowerBias[i] = - LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); + Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &sourceIndex : sources ) { - double dldj = - dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + double dldj = Layer::dLSELowerBound2( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); _symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } } - _symbolicUpperBias[i] = LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + _symbolicUpperBias[i] = + Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &sourceIndex : sources ) { - double dudj = - dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + double dudj = Layer::dLSEUpperbound( + sourceMids, targetLbs, targetUbs, index, inputIndex ); _symbolicUb[_size * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; @@ -161,23 +162,25 @@ void DeepPolySoftmaxElement::execute( } else if ( _boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) { - _symbolicLowerBias[i] = ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + _symbolicLowerBias[i] = + Layer::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); unsigned inputIndex = 0; for ( const auto &sourceIndex : sources ) { double dldj = - dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + Layer::dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); _symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } - _symbolicUpperBias[i] = ERUpperBound( sourceMids, targetLbs, targetUbs, index ); + _symbolicUpperBias[i] = + Layer::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &sourceIndex : sources ) { double dudj = - dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + Layer::dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); _symbolicUb[_size * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; @@ -334,317 +337,6 @@ void DeepPolySoftmaxElement::freeMemoryIfNeeded() } } -double DeepPolySoftmaxElement::LSELowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - double sum = 0; - for ( unsigned j = 0; j < inputs.size(); ++j ) - { - double lj = inputLbs[j]; - double uj = inputUbs[j]; - double xj = inputs[j]; - sum += - ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); - } - - return std::exp( inputs[i] ) / sum; -} - -double DeepPolySoftmaxElement::dLSELowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) -{ - double val = 0; - if ( i == di ) - val += LSELowerBound( inputMids, inputLbs, inputUbs, i ); - - double ldi = inputLbs[di]; - double udi = inputUbs[di]; - - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - double lj = inputLbs[j]; - double uj = inputUbs[j]; - double xj = inputMids[j]; - - sum += - ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); - } - - val -= std::exp( inputMids[i] ) / ( sum * sum ) * ( std::exp( udi ) - std::exp( ldi ) ) / - ( udi - ldi ); - - return val; -} - -double DeepPolySoftmaxElement::LSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - double max = FloatUtils::negativeInfinity(); - unsigned maxInputIndex = 0; - unsigned index = 0; - for ( const auto &mid : inputMids ) - { - if ( mid > max ) - { - max = mid; - maxInputIndex = index; - } - ++index; - } - - if ( maxInputIndex == i ) - return ERLowerBound( inputMids, inputLbs, inputUbs, i ); - else - { - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - sum += 1; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - double xjjstar = inputMids[j] - inputMids[maxInputIndex]; - - sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + - ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); - } - } - - return std::exp( inputMids[i] - inputMids[maxInputIndex] ) / sum; - } -} - -double DeepPolySoftmaxElement::dLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) -{ - double max = FloatUtils::negativeInfinity(); - unsigned maxInputIndex = 0; - unsigned index = 0; - for ( const auto &mid : inputMids ) - { - if ( mid > max ) - { - max = mid; - maxInputIndex = index; - } - ++index; - } - - if ( maxInputIndex == i ) - return dERLowerBound( inputMids, inputLbs, inputUbs, i, di ); - else - { - double val = LSELowerBound2( inputMids, inputLbs, inputUbs, i ); - - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - sum += 1; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - double xjjstar = inputMids[j] - inputMids[maxInputIndex]; - sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + - ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); - } - } - double val2 = std::exp( inputMids[i] - inputMids[maxInputIndex] ) / ( sum * sum ); - - if ( i == di ) - { - double ldijstar = inputLbs[i] - inputUbs[maxInputIndex]; - double udijstar = inputUbs[i] - inputLbs[maxInputIndex]; - return val - - val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / ( udijstar - ldijstar ); - } - else if ( maxInputIndex == di ) - { - double sum2 = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - continue; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - sum2 += ( std::exp( ujjstar ) - std::exp( ljjstar ) ) / ( ujjstar - ljjstar ); - } - } - return -val + val2 * sum2; - } - else - { - double ldijstar = inputLbs[di] - inputUbs[maxInputIndex]; - double udijstar = inputUbs[di] - inputLbs[maxInputIndex]; - return -val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / - ( udijstar - ldijstar ); - } - } -} - -double DeepPolySoftmaxElement::LSEUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; - - Vector inputTilda; - SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); - - return ( ( li * std::log( ui ) - ui * std::log( li ) ) / ( std::log( ui ) - std::log( li ) ) - - ( ui - li ) / ( std::log( ui ) - std::log( li ) ) * - SoftmaxConstraint::logSumOfExponential( inputTilda ) ); -} - -double DeepPolySoftmaxElement::dLSEUpperbound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; - - double val = -( ui - li ) / ( std::log( ui ) - std::log( li ) ); - - double val2 = std::exp( inputMids[di] ) / SoftmaxConstraint::sumOfExponential( inputMids ); - if ( i == di ) - val2 -= 1; - - return val * val2; -} - -double DeepPolySoftmaxElement::ERLowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - Vector inputTilda; - SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); - - double sum = 0; - for ( unsigned j = 0; j < inputs.size(); ++j ) - { - if ( i == j ) - sum += 1; - else - { - double ljTilda = inputLbs[j] - inputUbs[i]; - double ujTilda = inputUbs[j] - inputLbs[i]; - double xjTilda = inputTilda[j]; - - sum += ( ujTilda - xjTilda ) / ( ujTilda - ljTilda ) * std::exp( ljTilda ) + - ( xjTilda - ljTilda ) / ( ujTilda - ljTilda ) * std::exp( ujTilda ); - } - } - - return 1 / sum; -} - -double DeepPolySoftmaxElement::dERLowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) -{ - double val = ERLowerBound( inputMids, inputLbs, inputUbs, i ); - - if ( i != di ) - { - double ldiTilda = inputLbs[di] - inputUbs[i]; - double udiTilda = inputUbs[di] - inputLbs[i]; - return -val * val * ( std::exp( udiTilda ) - std::exp( ldiTilda ) ) / - ( udiTilda - ldiTilda ); - } - else - { - double val2 = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j != i ) - { - double ljTilda = inputLbs[j] - inputUbs[i]; - double ujTilda = inputUbs[j] - inputLbs[i]; - val2 += ( std::exp( ujTilda ) - std::exp( ljTilda ) ) / ( ujTilda - ljTilda ); - } - } - return val * val * val2; - } -} - -double DeepPolySoftmaxElement::ERUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; - - Vector inputTilda; - SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); - - return ui + li - ui * li * SoftmaxConstraint::sumOfExponential( inputTilda ); -} - -double DeepPolySoftmaxElement::dERUpperBound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; - - - if ( i == di ) - { - double val2 = -1; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - val2 += std::exp( inputMids[j] - inputMids[i] ); - return li * ui * val2; - } - else - return -li * ui * std::exp( inputMids[di] - inputMids[i] ); -} - -double DeepPolySoftmaxElement::linearLowerBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - Vector uTilda; - SoftmaxConstraint::xTilda( inputUbs, inputLbs[i], uTilda ); - uTilda[i] = 0; - return 1 / SoftmaxConstraint::sumOfExponential( uTilda ); -} - -double DeepPolySoftmaxElement::linearUpperBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - Vector lTilda; - SoftmaxConstraint::xTilda( inputLbs, inputUbs[i], lTilda ); - lTilda[i] = 0; - return 1 / SoftmaxConstraint::sumOfExponential( lTilda ); -} - void DeepPolySoftmaxElement::log( const String &message ) { if ( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING ) diff --git a/src/nlr/DeepPolySoftmaxElement.h b/src/nlr/DeepPolySoftmaxElement.h index a7b2220220..82e74dae56 100644 --- a/src/nlr/DeepPolySoftmaxElement.h +++ b/src/nlr/DeepPolySoftmaxElement.h @@ -42,61 +42,6 @@ class DeepPolySoftmaxElement : public DeepPolyElement unsigned targetLayerSize, DeepPolyElement *predecessor ); - // The following methods compute concrete softmax output bounds - // using different linear approximation, as well as the coefficients - // of softmax inputs in the symbolic bounds - static double LSELowerBound( const Vector &sourceMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned outputIndex ); - static double dLSELowerBound( const Vector &sourceMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned outputIndex, - unsigned inputIndex ); - static double LSELowerBound2( const Vector &sourceMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned outputIndex ); - static double dLSELowerBound2( const Vector &sourceMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned outputIndex, - unsigned inputIndex ); - static double LSEUpperBound( const Vector &sourceMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned outputIndex ); - static double dLSEUpperbound( const Vector &sourceMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned outputIndex, - unsigned inputIndex ); - static double ERLowerBound( const Vector &sourceMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned outputIndex ); - static double dERLowerBound( const Vector &sourceMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned outputIndex, - unsigned inputIndex ); - static double ERUpperBound( const Vector &sourceMids, - const Vector &outputLbs, - const Vector &outputUbs, - unsigned outputIndex ); - static double dERUpperBound( const Vector &sourceMids, - const Vector &outputLbs, - const Vector &outputUbs, - unsigned outputIndex, - unsigned inputIndex ); - static double linearLowerBound( const Vector &outputLbs, - const Vector &outputUbs, - unsigned outputIndex ); - static double linearUpperBound( const Vector &outputLbs, - const Vector &outputUbs, - unsigned outputIndex ); - private: SoftmaxBoundType _boundType; unsigned _maxLayerSize; diff --git a/src/nlr/DeepPolyWeightedSumElement.cpp b/src/nlr/DeepPolyWeightedSumElement.cpp index 31a5c297b0..3f7276a433 100644 --- a/src/nlr/DeepPolyWeightedSumElement.cpp +++ b/src/nlr/DeepPolyWeightedSumElement.cpp @@ -102,6 +102,20 @@ void DeepPolyWeightedSumElement::computeBoundWithBackSubstitution( _workSymbolicUpperBias, currentElement, deepPolyElementsBefore ); + + if ( _storeSymbolicBounds ) + { + precedingElement->storeWorkSymbolicBounds( sourceLayerSize, + _work1SymbolicLb, + _work1SymbolicUb, + _workSymbolicLowerBias, + _workSymbolicUpperBias, + _residualLb, + _residualUb, + _residualLayerIndices, + deepPolyElementsBefore ); + } + log( Stringf( "Computing symbolic bounds with respect to layer %u - done", predecessorIndex ) ); while ( currentElement->hasPredecessor() || !_residualLayerIndices.empty() ) @@ -229,6 +243,19 @@ void DeepPolyWeightedSumElement::computeBoundWithBackSubstitution( std::fill_n( _residualLb[newCurrentIndex], currentMatrixSize, 0 ); std::fill_n( _residualUb[newCurrentIndex], currentMatrixSize, 0 ); } + + if ( _storeSymbolicBounds ) + { + precedingElement->storeWorkSymbolicBounds( sourceLayerSize, + _work1SymbolicLb, + _work1SymbolicUb, + _workSymbolicLowerBias, + _workSymbolicUpperBias, + _residualLb, + _residualUb, + _residualLayerIndices, + deepPolyElementsBefore ); + } } ASSERT( _residualLayerIndices.empty() ); log( "Computing bounds with back substitution - done" ); diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 4363ecba2f..f7964de418 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -1,5 +1,5 @@ /********************* */ -/*! \file NetworkLevelReasoner.cpp +/*! \file LPFormulator.cpp ** \verbatim ** Top contributors (to current version): ** Guy Katz @@ -251,7 +251,7 @@ void LPFormulator::optimizeBoundsWithIncrementalLpRelaxation( const Map &layers, bool backward, - const Vector &coeffs, + const Map> &layerIndicesToParameters, const Vector &polygonal_tightenings ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -306,7 +306,7 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( // optimize every neuron of layer optimizeBoundsOfNeuronsWithLpRelaxation( - argument, backward, coeffs, polygonal_tightenings ); + argument, backward, layerIndicesToParameters, polygonal_tightenings ); LPFormulator_LOG( Stringf( "Tightening bound for layer %u - done", layerIndex ).ascii() ); } @@ -340,22 +340,6 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( throw InfeasibleQueryException(); } -void LPFormulator::optimizeBoundsWithPreimageApproximation( Map &layers ) -{ - const Vector &optimal_coeffs = layers[0]->OptimalParameterisedSymbolicBoundTightening(); - optimizeBoundsWithLpRelaxation( layers, false, optimal_coeffs ); - optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs ); -} - -void LPFormulator::optimizeBoundsWithPMNR( Map &layers ) -{ - const Vector &polygonal_tightenings = - layers[0]->OptimizeParameterisedPolygonalTightening(); - const Vector &optimal_coeffs = layers[0]->OptimalParameterisedSymbolicBoundTightening(); - optimizeBoundsWithLpRelaxation( layers, false, optimal_coeffs, polygonal_tightenings ); - optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs, polygonal_tightenings ); -} - void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ) { @@ -432,7 +416,7 @@ void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &coeffs, + const Map> &layerIndicesToParameters, const Vector &polygonal_tightenings ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -546,11 +530,17 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRelaxation( mtx.lock(); if ( backward ) - createLPRelaxationAfter( - layers, *freeSolver, lastIndexOfRelaxation, coeffs, polygonal_tightenings ); + createLPRelaxationAfter( layers, + *freeSolver, + lastIndexOfRelaxation, + layerIndicesToParameters, + polygonal_tightenings ); else - createLPRelaxation( - layers, *freeSolver, lastIndexOfRelaxation, coeffs, polygonal_tightenings ); + createLPRelaxation( layers, + *freeSolver, + lastIndexOfRelaxation, + layerIndicesToParameters, + polygonal_tightenings ); mtx.unlock(); // spawn a thread to tighten the bounds for the current variable @@ -670,11 +660,12 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & } } -void LPFormulator::createLPRelaxation( const Map &layers, - GurobiWrapper &gurobi, - unsigned lastLayer, - const Vector &coeffs, - const Vector &polygonal_tightenings ) +void LPFormulator::createLPRelaxation( + const Map &layers, + GurobiWrapper &gurobi, + unsigned lastLayer, + const Map> &layerIndicesToParameters, + const Vector &polygonal_tightenings ) { for ( const auto &layer : layers ) { @@ -682,12 +673,10 @@ void LPFormulator::createLPRelaxation( const Map &layers, if ( currentLayerIndex > lastLayer ) continue; - if ( coeffs.empty() ) + if ( layerIndicesToParameters.empty() ) addLayerToModel( gurobi, layer.second, false ); else { - Map> layerIndicesToParameters = - layer.second->getParametersForLayers( layers, coeffs ); const Vector ¤tLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; addLayerToParameterisedModel( gurobi, layer.second, false, currentLayerCoeffs ); } @@ -699,7 +688,7 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers, GurobiWrapper &gurobi, unsigned firstLayer, - const Vector &coeffs, + const Map> &layerIndicesToParameters, const Vector &polygonal_tightenings ) { unsigned depth = GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH; @@ -718,12 +707,10 @@ void LPFormulator::createLPRelaxationAfter( continue; else { - if ( coeffs.empty() ) + if ( layerIndicesToParameters.empty() ) addLayerToModel( gurobi, currentLayer, true ); else { - Map> layerIndicesToParameters = - currentLayer->getParametersForLayers( layers, coeffs ); const Vector ¤tLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; addLayerToParameterisedModel( gurobi, currentLayer, true, currentLayerCoeffs ); @@ -1360,11 +1347,9 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, } double ub = - std::min( DeepPolySoftmaxElement::linearUpperBound( sourceLbs, sourceUbs, index ), - layer->getUb( i ) ); + std::min( Layer::linearUpperBound( sourceLbs, sourceUbs, index ), layer->getUb( i ) ); double lb = - std::max( DeepPolySoftmaxElement::linearLowerBound( sourceLbs, sourceUbs, index ), - layer->getLb( i ) ); + std::max( Layer::linearLowerBound( sourceLbs, sourceUbs, index ), layer->getLb( i ) ); targetLbs[index] = lb; targetUbs[index] = ub; @@ -1396,14 +1381,13 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, { terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSELowerBound( - sourceMids, sourceLbs, sourceUbs, index ); + bias = Layer::LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = DeepPolySoftmaxElement::dLSELowerBound( + double dldj = Layer::dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); @@ -1416,14 +1400,13 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, { terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSELowerBound2( - sourceMids, sourceLbs, sourceUbs, index ); + bias = Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = DeepPolySoftmaxElement::dLSELowerBound2( + double dldj = Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); @@ -1435,15 +1418,14 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSEUpperBound( - sourceMids, targetLbs, targetUbs, index ); + bias = Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = DeepPolySoftmaxElement::dLSEUpperbound( + double dudj = Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); bias -= dudj * sourceMids[inputIndex]; @@ -1455,16 +1437,15 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, { terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = - DeepPolySoftmaxElement::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + bias = Layer::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); unsigned inputIndex = 0; for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = DeepPolySoftmaxElement::dERLowerBound( - sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + double dldj = + Layer::dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); bias -= dldj * sourceMids[inputIndex]; ++inputIndex; @@ -1473,16 +1454,15 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = - DeepPolySoftmaxElement::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); + bias = Layer::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = DeepPolySoftmaxElement::dERUpperBound( - sourceMids, targetLbs, targetUbs, index, inputIndex ); + double dudj = + Layer::dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); bias -= dudj * sourceMids[inputIndex]; ++inputIndex; @@ -2172,14 +2152,14 @@ void LPFormulator::addPolyognalTighteningsToLpRelaxation( unsigned lastLayer, const Vector &polygonal_tightenings ) { + List terms; for ( const auto &tightening : polygonal_tightenings ) { - bool out_of_bounds = false; Map neuronToCoefficient = tightening._neuronToCoefficient; PolygonalTightening::PolygonalBoundType type = tightening._type; double value = tightening._value; - List terms; + bool out_of_bounds = false; for ( const auto &pair : neuronToCoefficient ) { unsigned currentLayerIndex = pair.first._layer; @@ -2188,12 +2168,12 @@ void LPFormulator::addPolyognalTighteningsToLpRelaxation( out_of_bounds = true; } } - if ( out_of_bounds ) { continue; } + terms.clear(); for ( const auto &pair : neuronToCoefficient ) { unsigned currentLayerIndex = pair.first._layer; diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index cd67766b15..76e0cd0b10 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -52,12 +52,15 @@ class LPFormulator : public ParallelSolver LP model is adjusted from the previous call, instead of being constructed from scratch */ - void optimizeBoundsWithLpRelaxation( const Map &layers, - bool backward = false, - const Vector &coeffs = Vector( {} ), - const Vector &polygonal_tightenings = - Vector( {} ) ); + void + optimizeBoundsWithLpRelaxation( const Map &layers, + bool backward = false, + const Map> &layerIndicesToParameters = + Map>(), + const Vector &polygonal_tightenings = + Vector( {} ) ); void optimizeBoundsWithPreimageApproximation( Map &layers ); + void optimizeBoundsWithInvprop( Map &layers ); void optimizeBoundsWithPMNR( Map &layers ); void optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ); @@ -79,13 +82,15 @@ class LPFormulator : public ParallelSolver void createLPRelaxation( const Map &layers, GurobiWrapper &gurobi, unsigned lastLayer = UINT_MAX, - const Vector &coeffs = Vector( {} ), + const Map> &layerIndicesToParameters = + Map>(), const Vector &polygonal_tightenings = Vector( {} ) ); void createLPRelaxationAfter( const Map &layers, GurobiWrapper &gurobi, unsigned firstLayer, - const Vector &coeffs = Vector( {} ), + const Map> &layerIndicesToParameters = + Map>(), const Vector &polygonal_tightenings = Vector( {} ) ); double solveLPRelaxation( GurobiWrapper &gurobi, @@ -142,7 +147,8 @@ class LPFormulator : public ParallelSolver void optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args, bool backward, - const Vector &coeffs = Vector( {} ), + const Map> &layerIndicesToParameters = + Map>(), const Vector &polygonal_tightenings = Vector( {} ) ); diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 303fcd53dc..a2d227f641 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -79,7 +79,13 @@ void Layer::allocateMemory() Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX || Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR ) + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) { _symbolicLb = new double[_size * _inputLayerSize]; _symbolicUb = new double[_size * _inputLayerSize]; @@ -1227,8 +1233,8 @@ void Layer::computeIntervalArithmeticBoundsForSoftmax() } } - double lb = softmaxLinearLowerBound( sourceLbs, sourceUbs, index ); - double ub = softmaxLinearUpperBound( sourceLbs, sourceUbs, index ); + double lb = linearLowerBound( sourceLbs, sourceUbs, index ); + double ub = linearUpperBound( sourceLbs, sourceUbs, index ); if ( _lb[i] < lb ) { _lb[i] = lb; @@ -2735,8 +2741,8 @@ void Layer::computeSymbolicBoundsForSoftmax() } } - double lb = softmaxLinearLowerBound( sourceLbs, sourceUbs, index ); - double ub = softmaxLinearUpperBound( sourceLbs, sourceUbs, index ); + double lb = linearLowerBound( sourceLbs, sourceUbs, index ); + double ub = linearUpperBound( sourceLbs, sourceUbs, index ); if ( _lb[i] < lb ) { _lb[i] = lb; @@ -2777,11 +2783,11 @@ void Layer::computeSymbolicBoundsForSoftmax() if ( !useLSE2 ) { _symbolicLowerBias[i] = - softmaxLSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); + LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &sourceIndex : sources ) { - double dldj = softmaxdLSELowerBound( - sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + double dldj = + dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); symbolicLb[len * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; @@ -2790,24 +2796,23 @@ void Layer::computeSymbolicBoundsForSoftmax() else { _symbolicLowerBias[i] = - softmaxLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); + LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &sourceIndex : sources ) { - double dldj = softmaxdLSELowerBound2( - sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + double dldj = + dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); symbolicLb[len * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } } - _symbolicUpperBias[i] = - softmaxLSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + _symbolicUpperBias[i] = LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &sourceIndex : sources ) { - double dudj = softmaxdLSEUpperbound( - sourceMids, targetLbs, targetUbs, index, inputIndex ); + double dudj = + dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); symbolicUb[len * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; @@ -2815,25 +2820,23 @@ void Layer::computeSymbolicBoundsForSoftmax() } else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) { - _symbolicLowerBias[i] = - softmaxERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + _symbolicLowerBias[i] = ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); unsigned inputIndex = 0; for ( const auto &sourceIndex : sources ) { double dldj = - softmaxdERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); symbolicLb[len * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } - _symbolicUpperBias[i] = - softmaxERUpperBound( sourceMids, targetLbs, targetUbs, index ); + _symbolicUpperBias[i] = ERUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &sourceIndex : sources ) { double dudj = - softmaxdERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); symbolicUb[len * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; @@ -4297,179 +4300,6 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector } } -const Vector Layer::OptimalParameterisedSymbolicBoundTightening() -{ - const Map &layers = _layerOwner->getLayerIndexToLayer(); - - // Search over coeffs in [0, 1]^number_of_parameters with projected grdient descent. - unsigned max_iterations = - GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; - double step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; - double epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; - double weight_decay = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY; - double lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; - unsigned dimension = getNumberOfParameters( layers ); - bool maximize = false; - - Vector lower_bounds( dimension ); - Vector upper_bounds( dimension ); - for ( size_t j = 0; j < dimension; ++j ) - { - lower_bounds[j] = 0; - upper_bounds[j] = 1; - } - - // Initialize initial guess uniformly. - Vector guess( dimension ); - std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); - std::uniform_real_distribution dis( 0, 1 ); - for ( size_t j = 0; j < dimension; ++j ) - { - double lb = lower_bounds[j]; - double ub = upper_bounds[j]; - guess[j] = lb + dis( rng ) * ( ub - lb ); - } - - Vector> candidates( dimension ); - Vector gradient( dimension ); - - for ( size_t i = 0; i < max_iterations; ++i ) - { - double current_cost = EstimateVolume( guess ); - for ( size_t j = 0; j < dimension; ++j ) - { - candidates[j] = Vector( guess ); - candidates[j][j] += step_size; - - if ( candidates[j][j] > upper_bounds[j] || candidates[j][j] < lower_bounds[j] ) - { - gradient[j] = 0; - continue; - } - - size_t sign = ( maximize == false ? 1 : -1 ); - double cost = EstimateVolume( candidates[j] ); - gradient[j] = sign * ( cost - current_cost ) / step_size + weight_decay * guess[j]; - } - - bool gradient_is_zero = true; - for ( size_t j = 0; j < dimension; ++j ) - { - if ( FloatUtils::abs( gradient[j] ) > epsilon ) - { - gradient_is_zero = false; - } - } - if ( gradient_is_zero ) - { - break; - } - - for ( size_t j = 0; j < dimension; ++j ) - { - guess[j] -= lr * gradient[j]; - - if ( guess[j] > upper_bounds[j] ) - { - guess[j] = upper_bounds[j]; - } - - if ( guess[j] < lower_bounds[j] ) - { - guess[j] = lower_bounds[j]; - } - } - } - - const Vector optimal_coeffs( guess ); - return optimal_coeffs; -} - - -const Vector Layer::OptimizeParameterisedPolygonalTightening() -{ - // ... -} - -const Vector -Layer::OptimizeSingleParameterisedPolygonalTightening( PolygonalTightening tightening, - Vector prevTightenings ) -{ - // ... -} - -double Layer::EstimateVolume( const Vector &coeffs ) -{ - // First, run parameterised symbolic bound propagation. - const Map &layers = _layerOwner->getLayerIndexToLayer(); - Map> layerIndicesToParameters = - getParametersForLayers( layers, coeffs ); - for ( unsigned i = 0; i < layers.size(); ++i ) - { - ASSERT( layers.exists( i ) ); - const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; - layers[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); - } - - std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); - double log_box_volume = 0; - double sigmoid_sum = 0; - - unsigned inputLayerIndex = 0; - unsigned outputLayerIndex = layers.size() - 1; - Layer *inputLayer = layers[inputLayerIndex]; - Layer *outputLayer = layers[outputLayerIndex]; - - // Calculate volume of input variables' bounding box. - for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) - { - if ( inputLayer->neuronEliminated( index ) ) - continue; - - double lb = inputLayer->getLb( index ); - double ub = inputLayer->getUb( index ); - - if ( lb == ub ) - continue; - - log_box_volume += std::log( ub - lb ); - } - - for ( unsigned i = 0; i < GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; ++i ) - { - // Sample input point from known bounds. - Map point; - for ( unsigned j = 0; j < inputLayer->getSize(); ++j ) - { - if ( inputLayer->neuronEliminated( j ) ) - { - point.insert( j, 0 ); - } - else - { - double lb = inputLayer->getLb( j ); - double ub = inputLayer->getUb( j ); - std::uniform_real_distribution<> dis( lb, ub ); - point.insert( j, dis( rng ) ); - } - } - - // Calculate sigmoid of maximum margin from output symbolic bounds. - double max_margin = 0; - for ( unsigned j = 0; j < outputLayer->getSize(); ++j ) - { - if ( outputLayer->neuronEliminated( j ) ) - continue; - - double margin = outputLayer->calculateDifferenceFromSymbolic( point, j ); - max_margin = std::max( max_margin, margin ); - } - sigmoid_sum += SigmoidConstraint::sigmoid( max_margin ); - } - - return std::exp( log_box_volume + std::log( sigmoid_sum ) ) / - GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; -} double Layer::calculateDifferenceFromSymbolic( Map &point, unsigned i ) const { @@ -4485,88 +4315,10 @@ double Layer::calculateDifferenceFromSymbolic( Map &point, uns return std::max( _ub[i] - upperSum, lowerSum - _lb[i] ); } -double Layer::getParameterisdPolygonalTighteningLowerBound( - const Vector &coeffs, - const Vector &gamma, - PolygonalTightening tightening, - Vector prevTightenings ) const -{ - // ... -} - - -const List Layer::selectConstraints( const Map &layers ) const -{ - // ... -} - -const Vector -Layer::generatePolygonalTightenings( const Map &layers ) const -{ - // ... -} - - -unsigned Layer::getNumberOfParametersPerType( Type t ) const -{ - if ( t == Layer::RELU || t == Layer::LEAKY_RELU ) - return 1; - - if ( t == Layer::SIGN || t == Layer::BILINEAR ) - return 2; - - return 0; -} - -unsigned Layer::getNumberOfParameters( const Map &layers ) const -{ - unsigned num = 0; - for ( auto pair : layers ) - { - unsigned layerIndex = pair.first; - Layer *layer = layers[layerIndex]; - num += getNumberOfParametersPerType( layer->getLayerType() ); - } - return num; -} - -Map> Layer::getParametersForLayers( const Map &layers, - const Vector &coeffs ) const -{ - unsigned index = 0; - Map> layerIndicesToParameters; - for ( auto pair : layers ) - { - unsigned layerIndex = pair.first; - Layer *layer = layers[layerIndex]; - unsigned n_coeffs = getNumberOfParametersPerType( layer->getLayerType() ); - Vector current_coeffs( n_coeffs ); - for ( unsigned i = 0; i < n_coeffs; i++ ) - { - current_coeffs[i] = coeffs[index + i]; - } - layerIndicesToParameters.insert( layerIndex, current_coeffs ); - index += n_coeffs; - } - return layerIndicesToParameters; -} - -unsigned Layer::countNonlinearLayers( const Map &layers ) const -{ - unsigned num = 0; - for ( auto pair : layers ) - { - unsigned layerIndex = pair.first; - Layer *layer = layers[layerIndex]; - num += layer->getLayerType() != Layer::WEIGHTED_SUM ? 1 : 0; - } - return num; -} - -double Layer::softmaxLSELowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +double Layer::LSELowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { double sum = 0; for ( unsigned j = 0; j < inputs.size(); ++j ) @@ -4581,15 +4333,15 @@ double Layer::softmaxLSELowerBound( const Vector &inputs, return std::exp( inputs[i] ) / sum; } -double Layer::softmaxdLSELowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) +double Layer::dLSELowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) { double val = 0; if ( i == di ) - val += softmaxLSELowerBound( inputMids, inputLbs, inputUbs, i ); + val += LSELowerBound( inputMids, inputLbs, inputUbs, i ); double ldi = inputLbs[di]; double udi = inputUbs[di]; @@ -4611,10 +4363,10 @@ double Layer::softmaxdLSELowerBound( const Vector &inputMids, return val; } -double Layer::softmaxLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +double Layer::LSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { double max = FloatUtils::negativeInfinity(); unsigned maxInputIndex = 0; @@ -4630,7 +4382,7 @@ double Layer::softmaxLSELowerBound2( const Vector &inputMids, } if ( maxInputIndex == i ) - return softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); + return ERLowerBound( inputMids, inputLbs, inputUbs, i ); else { double sum = 0; @@ -4653,11 +4405,11 @@ double Layer::softmaxLSELowerBound2( const Vector &inputMids, } } -double Layer::softmaxdLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) +double Layer::dLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) { double max = FloatUtils::negativeInfinity(); unsigned maxInputIndex = 0; @@ -4673,10 +4425,10 @@ double Layer::softmaxdLSELowerBound2( const Vector &inputMids, } if ( maxInputIndex == i ) - return softmaxdERLowerBound( inputMids, inputLbs, inputUbs, i, di ); + return dERLowerBound( inputMids, inputLbs, inputUbs, i, di ); else { - double val = softmaxLSELowerBound2( inputMids, inputLbs, inputUbs, i ); + double val = LSELowerBound2( inputMids, inputLbs, inputUbs, i ); double sum = 0; for ( unsigned j = 0; j < inputMids.size(); ++j ) @@ -4727,10 +4479,10 @@ double Layer::softmaxdLSELowerBound2( const Vector &inputMids, } } -double Layer::softmaxLSEUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) +double Layer::LSEUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -4743,11 +4495,11 @@ double Layer::softmaxLSEUpperBound( const Vector &inputs, SoftmaxConstraint::logSumOfExponential( inputTilda ) ); } -double Layer::softmaxdLSEUpperbound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) +double Layer::dLSEUpperbound( const Vector &inputMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -4761,10 +4513,10 @@ double Layer::softmaxdLSEUpperbound( const Vector &inputMids, return val * val2; } -double Layer::softmaxERLowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +double Layer::ERLowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { Vector inputTilda; SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); @@ -4788,13 +4540,13 @@ double Layer::softmaxERLowerBound( const Vector &inputs, return 1 / sum; } -double Layer::softmaxdERLowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) +double Layer::dERLowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) { - double val = softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); + double val = ERLowerBound( inputMids, inputLbs, inputUbs, i ); if ( i != di ) { @@ -4819,10 +4571,10 @@ double Layer::softmaxdERLowerBound( const Vector &inputMids, } } -double Layer::softmaxERUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) +double Layer::ERUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -4833,15 +4585,16 @@ double Layer::softmaxERUpperBound( const Vector &inputs, return ui + li - ui * li * SoftmaxConstraint::sumOfExponential( inputTilda ); } -double Layer::softmaxdERUpperBound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) +double Layer::dERUpperBound( const Vector &inputMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ) { double li = outputLb[i]; double ui = outputUb[i]; + if ( i == di ) { double val2 = -1; @@ -4853,9 +4606,9 @@ double Layer::softmaxdERUpperBound( const Vector &inputMids, return -li * ui * std::exp( inputMids[di] - inputMids[i] ); } -double Layer::softmaxLinearLowerBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +double Layer::linearLowerBound( const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { Vector uTilda; SoftmaxConstraint::xTilda( inputUbs, inputLbs[i], uTilda ); @@ -4863,9 +4616,9 @@ double Layer::softmaxLinearLowerBound( const Vector &inputLbs, return 1 / SoftmaxConstraint::sumOfExponential( uTilda ); } -double Layer::softmaxLinearUpperBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +double Layer::linearUpperBound( const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { Vector lTilda; SoftmaxConstraint::xTilda( inputLbs, inputUbs[i], lTilda ); diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index 8a0fe325c4..02961c568f 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -140,35 +140,64 @@ class Layer void computeSymbolicBounds(); void computeParameterisedSymbolicBounds( const Vector &coeffs, bool receive = false ); + // Return difference between given point and upper and lower bounds determined by parameterised + // SBT relaxation. + double calculateDifferenceFromSymbolic( Map &point, unsigned i ) const; - // Get number of optimizable parameters for parameterised SBT relaxation per layer type. - unsigned getNumberOfParametersPerType( Type t ) const; - - // Get total number of optimizable parameters for parameterised SBT relaxation. - unsigned getNumberOfParameters( const Map &layers ) const; - - // Get total number of non-weighted sum layers for INVPROP. - unsigned countNonlinearLayers( const Map &layers ) const; - - // Get map containing vector of optimizable parameters for parameterised SBT relaxation for - // every layer index. - Map> getParametersForLayers( const Map &layers, - const Vector &coeffs ) const; - - // Return optimizable parameters which minimize parameterised SBT bounds' volume. - const Vector OptimalParameterisedSymbolicBoundTightening(); - - // Heuristically select neurons and polygonal tightenings for PMNR. - const List selectConstraints( const Map &layers ) const; - - const Vector - generatePolygonalTightenings( const Map &layers ) const; - - // Optimize biases of generated parameterised polygonal tightenings. - const Vector OptimizeParameterisedPolygonalTightening(); - const Vector - OptimizeSingleParameterisedPolygonalTightening( PolygonalTightening tightening, - Vector prevTightenings ); + // The following methods compute concrete softmax output bounds + // using different linear approximation, as well as the coefficients + // of softmax inputs in the symbolic bounds + static double LSELowerBound( const Vector &sourceMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned outputIndex ); + static double dLSELowerBound( const Vector &sourceMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned outputIndex, + unsigned inputIndex ); + static double LSELowerBound2( const Vector &sourceMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned outputIndex ); + static double dLSELowerBound2( const Vector &sourceMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned outputIndex, + unsigned inputIndex ); + static double LSEUpperBound( const Vector &sourceMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned outputIndex ); + static double dLSEUpperbound( const Vector &sourceMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned outputIndex, + unsigned inputIndex ); + static double ERLowerBound( const Vector &sourceMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned outputIndex ); + static double dERLowerBound( const Vector &sourceMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned outputIndex, + unsigned inputIndex ); + static double ERUpperBound( const Vector &sourceMids, + const Vector &outputLbs, + const Vector &outputUbs, + unsigned outputIndex ); + static double dERUpperBound( const Vector &sourceMids, + const Vector &outputLbs, + const Vector &outputUbs, + unsigned outputIndex, + unsigned inputIndex ); + static double linearLowerBound( const Vector &outputLbs, + const Vector &outputUbs, + unsigned outputIndex ); + static double linearUpperBound( const Vector &outputLbs, + const Vector &outputUbs, + unsigned outputIndex ); /* Preprocessing functionality: variable elimination and reindexing @@ -239,74 +268,6 @@ class Layer void allocateMemory(); void freeMemoryIfNeeded(); - /* - The following methods compute concrete softmax output bounds - using different linear approximation, as well as the coefficients - of softmax inputs in the symbolic bounds - */ - double softmaxLSELowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ); - - double softmaxdLSELowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ); - - double softmaxLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ); - - double softmaxdLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ); - - double softmaxLSEUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ); - - double softmaxdLSEUpperbound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ); - - double softmaxERLowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ); - - double softmaxdERLowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ); - - double softmaxERUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ); - - double softmaxdERUpperBound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ); - - double softmaxLinearLowerBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ); - - double softmaxLinearUpperBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ); - /* Helper functions for symbolic bound tightening */ @@ -323,7 +284,6 @@ class Layer void computeSymbolicBoundsForBilinear(); void computeSymbolicBoundsDefault(); - /* Helper functions for parameterised symbolic bound tightening */ @@ -334,20 +294,6 @@ class Layer void computeParameterisedSymbolicBoundsForBilinear( const Vector &coeffs, bool receive ); - // Estimate Volume of parameterised symbolic bound tightening. - double EstimateVolume( const Vector &coeffs ); - - // Return difference between given point and upper and lower bounds determined by parameterised - // SBT relaxation. - double calculateDifferenceFromSymbolic( Map &point, unsigned i ) const; - - // Get current lower bound for selected parameterised polygonal tightenings' biases. - double getParameterisdPolygonalTighteningLowerBound( - const Vector &coeffs, - const Vector &gamma, - PolygonalTightening tightening, - Vector prevTightenings ) const; - /* Helper functions for interval bound tightening */ diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index 94b8c57833..5976941a25 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -20,7 +20,6 @@ #include "FloatUtils.h" #include "InfeasibleQueryException.h" #include "IterativePropagator.h" -#include "LPFormulator.h" #include "MILPFormulator.h" #include "MStringf.h" #include "MarabouError.h" @@ -75,6 +74,11 @@ void NetworkLevelReasoner::addLayerDependency( unsigned sourceLayer, unsigned ta _layerIndexToLayer[sourceLayer]->getSize() ); } +void NetworkLevelReasoner::removeLayerDependency( unsigned sourceLayer, unsigned targetLayer ) +{ + _layerIndexToLayer[targetLayer]->removeSourceLayer( sourceLayer ); +} + void NetworkLevelReasoner::computeSuccessorLayers() { for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) @@ -224,7 +228,7 @@ void NetworkLevelReasoner::symbolicBoundPropagation() void NetworkLevelReasoner::parameterisedSymbolicBoundPropagation( const Vector &coeffs ) { Map> layerIndicesToParameters = - _layerIndexToLayer[0]->getParametersForLayers( _layerIndexToLayer, coeffs ); + getParametersForLayers( _layerIndexToLayer, coeffs ); for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) { const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; @@ -251,10 +255,17 @@ void NetworkLevelReasoner::lpRelaxationPropagation() lpFormulator.optimizeBoundsWithLpRelaxation( _layerIndexToLayer, true ); else if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) - lpFormulator.optimizeBoundsWithPreimageApproximation( _layerIndexToLayer ); + optimizeBoundsWithPreimageApproximation( _layerIndexToLayer, lpFormulator ); + else if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP ) + optimizeBoundsWithInvprop( _layerIndexToLayer, lpFormulator ); else if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR ) - lpFormulator.optimizeBoundsWithPMNR( _layerIndexToLayer ); + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) + optimizeBoundsWithPMNR( _layerIndexToLayer, lpFormulator ); else if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::LP_RELAXATION ) lpFormulator.optimizeBoundsWithLpRelaxation( _layerIndexToLayer ); @@ -263,6 +274,44 @@ void NetworkLevelReasoner::lpRelaxationPropagation() lpFormulator.optimizeBoundsWithIncrementalLpRelaxation( _layerIndexToLayer ); } +void NetworkLevelReasoner::optimizeBoundsWithPreimageApproximation( Map &layers, + LPFormulator &lpFormulator ) +{ + const Vector &optimal_coeffs = OptimalParameterisedSymbolicBoundTightening(); + Map> layerIndicesToParameters = + getParametersForLayers( layers, optimal_coeffs ); + lpFormulator.optimizeBoundsWithLpRelaxation( layers, false, layerIndicesToParameters ); + lpFormulator.optimizeBoundsWithLpRelaxation( layers, true, layerIndicesToParameters ); +} + +void NetworkLevelReasoner::optimizeBoundsWithInvprop( Map &layers, + LPFormulator &lpFormulator ) +{ + const Vector &polygonal_tightenings = + OptimizeParameterisedPolygonalTightening(); + const Vector &optimal_coeffs = Vector( {} ); + Map> layerIndicesToParameters = + getParametersForLayers( layers, optimal_coeffs ); + lpFormulator.optimizeBoundsWithLpRelaxation( + layers, false, layerIndicesToParameters, polygonal_tightenings ); + lpFormulator.optimizeBoundsWithLpRelaxation( + layers, true, layerIndicesToParameters, polygonal_tightenings ); +} + +void NetworkLevelReasoner::optimizeBoundsWithPMNR( Map &layers, + LPFormulator &lpFormulator ) +{ + const Vector &polygonal_tightenings = + OptimizeParameterisedPolygonalTightening(); + const Vector &optimal_coeffs = OptimalParameterisedSymbolicBoundTightening(); + Map> layerIndicesToParameters = + getParametersForLayers( layers, optimal_coeffs ); + lpFormulator.optimizeBoundsWithLpRelaxation( + layers, false, layerIndicesToParameters, polygonal_tightenings ); + lpFormulator.optimizeBoundsWithLpRelaxation( + layers, true, layerIndicesToParameters, polygonal_tightenings ); +} + void NetworkLevelReasoner::LPTighteningForOneLayer( unsigned targetIndex ) { LPFormulator lpFormulator( this ); @@ -318,6 +367,46 @@ void NetworkLevelReasoner::freeMemoryIfNeeded() for ( const auto &layer : _layerIndexToLayer ) delete layer.second; _layerIndexToLayer.clear(); + + for ( auto &pair : _outputLayerSymbolicLb ) + { + if ( pair.second ) + { + delete[] pair.second; + pair.second = NULL; + } + } + _outputLayerSymbolicLb.clear(); + + for ( auto &pair : _outputLayerSymbolicUb ) + { + if ( pair.second ) + { + delete[] pair.second; + pair.second = NULL; + } + } + _outputLayerSymbolicUb.clear(); + + for ( auto &pair : _outputLayerSymbolicLowerBias ) + { + if ( pair.second ) + { + delete[] pair.second; + pair.second = NULL; + } + } + _outputLayerSymbolicLowerBias.clear(); + + for ( auto &pair : _outputLayerSymbolicUpperBias ) + { + if ( pair.second ) + { + delete[] pair.second; + pair.second = NULL; + } + } + _outputLayerSymbolicUpperBias.clear(); } void NetworkLevelReasoner::storeIntoOther( NetworkLevelReasoner &other ) const @@ -704,6 +793,71 @@ double NetworkLevelReasoner::getPreviousBias( const ReluConstraint *reluConstrai return _previousBiases[reluConstraint]; } + +const double *NetworkLevelReasoner::getOutputLayerSymbolicLb( unsigned layerIndex ) const +{ + // Initialize map if empty + if ( _previousBiases.empty() ) + { + const_cast( this )->initializeOutputLayerSymbolicBounds(); + } + + if ( !_outputLayerSymbolicLb.exists( layerIndex ) ) + { + throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in symbolic bounds map." ); + } + + return _outputLayerSymbolicLb[layerIndex]; +} + +const double *NetworkLevelReasoner::getOutputLayerSymbolicUb( unsigned layerIndex ) const +{ + // Initialize map if empty + if ( _previousBiases.empty() ) + { + const_cast( this )->initializeOutputLayerSymbolicBounds(); + } + + if ( !_outputLayerSymbolicUb.exists( layerIndex ) ) + { + throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in symbolic bounds map." ); + } + + return _outputLayerSymbolicUb[layerIndex]; +} + +const double *NetworkLevelReasoner::getOutputLayerSymbolicLowerBias( unsigned layerIndex ) const +{ + // Initialize map if empty + if ( _previousBiases.empty() ) + { + const_cast( this )->initializeOutputLayerSymbolicBounds(); + } + + if ( !_outputLayerSymbolicLowerBias.exists( layerIndex ) ) + { + throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in symbolic bounds map." ); + } + + return _outputLayerSymbolicLowerBias[layerIndex]; +} + +const double *NetworkLevelReasoner::getOutputLayerSymbolicUpperBias( unsigned layerIndex ) const +{ + // Initialize map if empty + if ( _previousBiases.empty() ) + { + const_cast( this )->initializeOutputLayerSymbolicBounds(); + } + + if ( !_outputLayerSymbolicUpperBias.exists( layerIndex ) ) + { + throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in symbolic bounds map." ); + } + + return _outputLayerSymbolicUpperBias[layerIndex]; +} + unsigned NetworkLevelReasoner::mergeConsecutiveWSLayers( const Map &lowerBounds, const Map &upperBounds, @@ -788,6 +942,941 @@ bool NetworkLevelReasoner::suitableForMerging( return true; } +const Vector NetworkLevelReasoner::OptimalParameterisedSymbolicBoundTightening() +{ + const Map &layers = getLayerIndexToLayer(); + + // Search over coeffs in [0, 1]^number_of_parameters with projected grdient descent. + unsigned max_iterations = + GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + double step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + double epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; + double weight_decay = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY; + double lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + unsigned dimension = getNumberOfParameters( layers ); + bool maximize = false; + int sign = ( maximize ? -1 : 1 ); + + Vector lower_bounds( dimension, 0 ); + Vector upper_bounds( dimension, 1 ); + + // Initialize initial guess uniformly. + Vector guess( dimension ); + std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); + std::uniform_real_distribution dis( 0, 1 ); + for ( unsigned j = 0; j < dimension; ++j ) + { + double lb = lower_bounds[j]; + double ub = upper_bounds[j]; + guess[j] = lb + dis( rng ) * ( ub - lb ); + } + + Vector> candidates( dimension ); + Vector gradient( dimension ); + + for ( unsigned i = 0; i < max_iterations; ++i ) + { + double current_cost = EstimateVolume( layers, guess ); + for ( unsigned j = 0; j < dimension; ++j ) + { + candidates[j] = Vector( guess ); + candidates[j][j] += step_size; + + if ( candidates[j][j] > upper_bounds[j] || candidates[j][j] < lower_bounds[j] ) + { + gradient[j] = 0; + continue; + } + + double cost = EstimateVolume( layers, candidates[j] ); + gradient[j] = sign * ( cost - current_cost ) / step_size + weight_decay * guess[j]; + } + + bool gradient_is_zero = true; + for ( unsigned j = 0; j < dimension; ++j ) + { + if ( FloatUtils::abs( gradient[j] ) > epsilon ) + { + gradient_is_zero = false; + } + } + if ( gradient_is_zero ) + { + break; + } + + for ( unsigned j = 0; j < dimension; ++j ) + { + guess[j] -= lr * gradient[j]; + + if ( guess[j] > upper_bounds[j] ) + { + guess[j] = upper_bounds[j]; + } + + if ( guess[j] < lower_bounds[j] ) + { + guess[j] = lower_bounds[j]; + } + } + } + + const Vector optimal_coeffs( guess ); + return optimal_coeffs; +} + +double NetworkLevelReasoner::EstimateVolume( const Map &layers, + const Vector &coeffs ) +{ + // First, run parameterised symbolic bound propagation. + Map> layerIndicesToParameters = + getParametersForLayers( layers, coeffs ); + for ( unsigned i = 0; i < layers.size(); ++i ) + { + ASSERT( layers.exists( i ) ); + const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; + layers[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); + } + + std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); + double log_box_volume = 0; + double sigmoid_sum = 0; + + unsigned inputLayerIndex = 0; + unsigned outputLayerIndex = layers.size() - 1; + Layer *inputLayer = layers[inputLayerIndex]; + Layer *outputLayer = layers[outputLayerIndex]; + + // Calculate volume of input variables' bounding box. + for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) + { + if ( inputLayer->neuronEliminated( index ) ) + continue; + + double lb = inputLayer->getLb( index ); + double ub = inputLayer->getUb( index ); + + if ( lb == ub ) + continue; + + log_box_volume += std::log( ub - lb ); + } + + for ( unsigned i = 0; i < GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; ++i ) + { + // Sample input point from known bounds. + Map point; + for ( unsigned j = 0; j < inputLayer->getSize(); ++j ) + { + if ( inputLayer->neuronEliminated( j ) ) + { + point.insert( j, 0 ); + } + else + { + double lb = inputLayer->getLb( j ); + double ub = inputLayer->getUb( j ); + std::uniform_real_distribution<> dis( lb, ub ); + point.insert( j, dis( rng ) ); + } + } + + // Calculate sigmoid of maximum margin from output symbolic bounds. + double max_margin = 0; + for ( unsigned j = 0; j < outputLayer->getSize(); ++j ) + { + if ( outputLayer->neuronEliminated( j ) ) + continue; + + double margin = outputLayer->calculateDifferenceFromSymbolic( point, j ); + max_margin = std::max( max_margin, margin ); + } + sigmoid_sum += SigmoidConstraint::sigmoid( max_margin ); + } + + return std::exp( log_box_volume + std::log( sigmoid_sum ) ) / + GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; +} + +const Vector NetworkLevelReasoner::OptimizeParameterisedPolygonalTightening() +{ + const Map &layers = getLayerIndexToLayer(); + const Vector &selectedTightenings = generatePolygonalTightenings( layers ); + const unsigned size = selectedTightenings.size(); + Vector optimizedTightenings = Vector( {} ); + for ( unsigned i = 0; i < size; ++i ) + { + PolygonalTightening tightening = selectedTightenings[i]; + double lower_bound = OptimizeSingleParameterisedPolygonalTightening( + layers, tightening, optimizedTightenings ); + tightening._value = lower_bound; + optimizedTightenings.append( tightening ); + } + const Vector tightenings( optimizedTightenings ); + return tightenings; +} + +double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTightening( + const Map &layers, + PolygonalTightening &tightening, + Vector &prevTightenings ) +{ + // Search over coeffs in [0, 1]^number_of_parameters, gamma in + // [0, inf)^size_of_prevTightenings with PGD. + unsigned max_iterations = GlobalConfiguration::INVPROP_MAX_ITERATIONS; + double step_size = GlobalConfiguration::INVPROP_STEP_SIZE; + double epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; + double weight_decay = GlobalConfiguration::INVPROP_WEIGHT_DECAY; + double lr = GlobalConfiguration::INVPROP_LEARNING_RATE; + unsigned coeffs_dimension = getNumberOfParameters( layers ); + unsigned gamma_dimension = prevTightenings.size(); + bool maximize = ( tightening._type == PolygonalTightening::LB ); + double best_bound = 0; + int sign = ( maximize ? -1 : 1 ); + + Vector coeffs_lower_bounds( coeffs_dimension, 0 ); + Vector coeffs_upper_bounds( coeffs_dimension, 1 ); + Vector gamma_lower_bounds( gamma_dimension, 0 ); + + Vector coeffs( coeffs_dimension, GlobalConfiguration::INVPROP_INITIAL_ALPHA ); + Vector gamma( gamma_dimension, GlobalConfiguration::INVPROP_INITIAL_GAMMA ); + + Vector> coeffs_candidates( coeffs_dimension ); + Vector coeffs_gradient( coeffs_dimension ); + Vector> gamma_candidates( gamma_dimension ); + Vector gamma_gradient( gamma_dimension ); + + for ( unsigned i = 0; i < max_iterations; ++i ) + { + double cost = getParameterisdPolygonalTighteningLowerBound( + layers, coeffs, gamma, tightening, prevTightenings ); + for ( unsigned j = 0; j < coeffs_dimension; ++j ) + { + coeffs_candidates[j] = Vector( coeffs ); + coeffs_candidates[j][j] += step_size; + + if ( coeffs_candidates[j][j] > coeffs_upper_bounds[j] || + coeffs_candidates[j][j] < coeffs_lower_bounds[j] ) + { + coeffs_gradient[j] = 0; + continue; + } + + double current_cost = getParameterisdPolygonalTighteningLowerBound( + layers, coeffs_candidates[j], gamma, tightening, prevTightenings ); + coeffs_gradient[j] = + sign * ( cost - current_cost ) / step_size + weight_decay * coeffs[j]; + best_bound = + ( maximize ? std::max( cost, current_cost ) : std::min( cost, current_cost ) ); + } + + for ( unsigned j = 0; j < gamma_dimension; ++j ) + { + gamma_candidates[j] = Vector( gamma ); + gamma_candidates[j][j] += step_size; + + if ( gamma_candidates[j][j] < gamma_lower_bounds[j] ) + { + gamma_gradient[j] = 0; + continue; + } + + double current_cost = getParameterisdPolygonalTighteningLowerBound( + layers, coeffs, gamma_candidates[j], tightening, prevTightenings ); + gamma_gradient[j] = + sign * ( cost - current_cost ) / step_size + weight_decay * gamma[j]; + best_bound = + ( maximize ? std::max( cost, current_cost ) : std::min( cost, current_cost ) ); + } + + bool gradient_is_zero = true; + for ( unsigned j = 0; j < coeffs_dimension; ++j ) + { + if ( FloatUtils::abs( coeffs_gradient[j] ) > epsilon ) + { + gradient_is_zero = false; + } + } + for ( unsigned j = 0; j < gamma_dimension; ++j ) + { + if ( FloatUtils::abs( gamma_gradient[j] ) > epsilon ) + { + gradient_is_zero = false; + } + } + if ( gradient_is_zero ) + { + break; + } + + for ( unsigned j = 0; j < coeffs_dimension; ++j ) + { + coeffs[j] -= lr * coeffs_gradient[j]; + gamma[j] -= lr * gamma_gradient[j]; + + if ( coeffs[j] > coeffs_upper_bounds[j] ) + { + coeffs[j] = coeffs_upper_bounds[j]; + } + + if ( coeffs[j] < coeffs_lower_bounds[j] ) + { + coeffs[j] = coeffs_lower_bounds[j]; + } + + if ( gamma[j] < gamma_lower_bounds[j] ) + { + gamma[j] = gamma_lower_bounds[j]; + } + } + } + + return best_bound; +} + +double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( + const Map &layers, + const Vector &coeffs, + const Vector &gamma, + PolygonalTightening &tightening, + Vector &prevTightenings ) +{ + // First, run parameterised symbolic bound propagation. + parameterisedSymbolicBoundPropagation( coeffs ); + + const unsigned numLayers = layers.size(); + const unsigned maxLayer = layers.size() - 1; + const unsigned numCoeffs = coeffs.size(); + const unsigned numGamma = gamma.size(); + const unsigned inputLayerSize = layers[0]->getSize(); + + Vector> mu( numLayers ); + Vector> mu_hat( numLayers ); + + for ( unsigned index = maxLayer; index > 0; --index ) + { + Layer *layer = layers[index]; + const unsigned layerSize = layer->getSize(); + const unsigned layerIndex = layer->getLayerIndex(); + + mu[index] = Vector( layerSize, 0 ); + mu_hat[index] = Vector( layerSize, 0 ); + + if ( layerIndex < maxLayer ) + { + for ( const unsigned successorLayerIndex : layer->getSuccessorLayers() ) + { + const Layer *successorLayer = getLayer( successorLayerIndex ); + const unsigned successorLayerSize = successorLayer->getSize(); + const Layer::Type successorLayerType = successorLayer->getLayerType(); + + if ( successorLayerType == Layer::WEIGHTED_SUM ) + { + const double *successorWeights = + successorLayer->getWeights( successorLayerIndex ); + for ( unsigned i = 0; i < layerSize; ++i ) + { + for ( unsigned j = 0; j < successorLayerSize; ++j ) + { + if ( !successorLayer->neuronEliminated( j ) ) + { + mu_hat[index][i] += mu[successorLayerIndex][j] * + successorWeights[i * successorLayerSize + j]; + } + } + } + } + + else + { + // const double *successorSymbolicLbInTermsOfPredecessor = + // successorLayer->getSymbolicLbInTermsOfPredecessor(); const double + // *successorSymbolicUbInTermsOfPredecessor = + // successorLayer->getSymbolicUbInTermsOfPredecessor(); + /*const double *successorSymbolicLbInTermsOfPredecessor = + successorLayer->getSymbolicLb(); + const double *successorSymbolicUbInTermsOfPredecessor = + successorLayer->getSymbolicUb(); + for ( unsigned i = 0; i < layerSize; ++i ) + { + for ( unsigned j = 0; j < successorLayerSize; ++j ) + { + if ( !successorLayer->neuronEliminated( j ) ) + { + if ( mu[successorLayerIndex][j] >= 0 ) + { + mu_hat[index][i] += mu[successorLayerIndex][j] * + successorSymbolicUbInTermsOfPredecessor + [i * successorLayerSize + j]; + } + else + { + mu_hat[index][i] -= mu[successorLayerIndex][j] * + successorSymbolicLbInTermsOfPredecessor + [i * successorLayerSize + j]; + } + } + } + }*/ + } + } + } + + + mu[index] += mu_hat[index]; + for ( unsigned i = 0; i < layerSize; ++i ) + { + mu_hat[index][i] -= tightening.getCoeff( NeuronIndex( index, i ) ); + for ( unsigned j = 0; j < numCoeffs; ++j ) + { + const PolygonalTightening pt = prevTightenings[j]; + double prevCoeff = pt.getCoeff( NeuronIndex( index, i ) ); + if ( pt._type == PolygonalTightening::UB ) + { + mu_hat[index][i] -= gamma[j] * prevCoeff; + } + else + { + mu_hat[index][i] += gamma[j] * prevCoeff; + } + } + } + } + + Vector globalBoundVector( inputLayerSize, 0 ); + for ( unsigned i = 0; i < inputLayerSize; ++i ) + { + globalBoundVector[i] += tightening.getCoeff( NeuronIndex( 0, i ) ) - mu_hat[0][i]; + for ( unsigned j = 0; j < numCoeffs; ++j ) + { + const PolygonalTightening pt = prevTightenings[j]; + double prevCoeff = pt.getCoeff( NeuronIndex( 0, i ) ); + if ( pt._type == PolygonalTightening::UB ) + { + globalBoundVector[i] += gamma[j] * prevCoeff; + } + else + { + globalBoundVector[i] -= gamma[j] * prevCoeff; + } + } + } + + + double lowerBound = 0; + for ( unsigned i = 0; i < numGamma; ++i ) + { + const PolygonalTightening pt = prevTightenings[i]; + if ( pt._type == PolygonalTightening::UB ) + { + lowerBound += gamma[i] * pt._value; + } + else + { + lowerBound -= gamma[i] * pt._value; + } + } + + for ( unsigned index = maxLayer; index > 0; --index ) + { + Layer *layer = layers[index]; + const unsigned layerSize = layer->getSize(); + + if ( layer->getLayerType() == Layer::WEIGHTED_SUM ) + { + const double *biases = layer->getBiases(); + for ( unsigned i = 0; i < layerSize; ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + lowerBound -= mu[index][i] * biases[i]; + } + else + { + lowerBound -= mu[index][i] * layer->getEliminatedNeuronValue( i ); + } + } + } + else + { + /* // const double *successorSymbolicLowerBiasInTermsOfPredecessor = + // successorLayer->getSymbolicLowerBiasInTermsOfPredecessor(); const double + // *successorSymbolicUpperBiasInTermsOfPredecessor = + // successorLayer->getSymbolicUpperBiasInTermsOfPredecessor(); + const double *successorSymbolicLowerBiasInTermsOfPredecessor = + layer->getSymbolicLowerBias(); + const double *successorSymbolicUpperBiasInTermsOfPredecessor = + layer->getSymbolicUpperBias(); + for ( unsigned i = 0; i < layerSize; ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + if ( mu[index][i] > 0 ) + { + lowerBound -= + mu[index][i] * successorSymbolicUpperBiasInTermsOfPredecessor[i]; + } + else + { + lowerBound += + mu[index][i] * successorSymbolicLowerBiasInTermsOfPredecessor[i]; + } + } + else + { + lowerBound += + FloatUtils::abs( mu[index][i] ) * layer->getEliminatedNeuronValue( i ); + } + }*/ + } + } + + Layer *inputLayer = layers[0]; + const double *inputLbs = inputLayer->getLbs(); + const double *inputUbs = inputLayer->getUbs(); + for ( unsigned i = 0; i < inputLayerSize; ++i ) + { + if ( globalBoundVector[i] > 0 ) + { + lowerBound += globalBoundVector[i] * inputUbs[i]; + } + else + { + lowerBound += globalBoundVector[i] * inputLbs[i]; + } + } + return lowerBound; +} + +const Vector +NetworkLevelReasoner::generatePolygonalTightenings( const Map &layers ) const +{ + /*Vector tightenings = Vector( {} ); + if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) + { + const List constraints = selectConstraints( layers ); + for ( const auto &pair : constraints ) + { + unsigned layerIndex = constraints._layer; + unsigned neuron = constraints._neuron; + Layer *layer = layers[layerIndex]; + + Map neuronToCoefficient = Map ( {} + ); + + if ( layer->getLayerType() == NetworkLevelReasoner::WEIGHTED_SUM ) + { + const double *biases = layer->getBiases(); + for ( unsigned i = 0; i < layerSize; ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + lowerBound -= mu[index][i] * biases[i]; + } + else + { + lowerBound -= mu[index][i] * layer->getEliminatedNeuronValue[i]; + } + } + } + else + { + const double *successorSymbolicLowerBiasInTermsOfPredecessor = + successorLayer->getSymbolicLbInTermsOfPredecessor(); const double + *successorSymbolicUpperBiasInTermsOfPredecessor = + successorLayer->getSymbolicUbInTermsOfPredecessor(); for ( unsigned i = 0; i < layerSize; ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + if ( mu[index][i] > 0 ) + { + lowerBound -= mu[index][i] * + successorSymbolicUpperBiasInTermsOfPredecessor[i]; + } + else + { + lowerBound += mu[index][i] * + successorSymbolicLowerBiasInTermsOfPredecessor[i]; + } + } + else + { + lowerBound += FloatUtils::abs( mu[index][i] ) * + layer->getEliminatedNeuronValue[i]; + } + } + } + } + + else if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP ) + { + + }*/ + unsigned n = layers.size(); + n = n; + const Vector defaultVector = Vector( {} ); + return defaultVector; +} + +const List +NetworkLevelReasoner::selectConstraints( const Map &layers ) const +{ + List neuronList = List( {} ); + + switch ( Options::get()->getMILPSolverBoundTighteningType() ) + { + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM: + { + unsigned numNonlinear = countNonlinearLayers( layers ); + std::mt19937_64 rng( GlobalConfiguration::PMNR_RANDOM_SEED ); + std::uniform_int_distribution dis_layer( 0, numNonlinear - 1 ); + unsigned nonlinearIndex = dis_layer( rng ); + + unsigned index = 0; + while ( index != nonlinearIndex ) + { + Layer::Type type = _layerIndexToLayer[index]->getLayerType(); + + if ( type != Layer::WEIGHTED_SUM ) + { + ++index; + } + } + + Layer *layer = _layerIndexToLayer[index]; + unsigned layerSize = layer->getSize(); + std::uniform_int_distribution dis_neuron( 0, layerSize - 1 ); + for ( unsigned i = 0; i < GlobalConfiguration::PMNR_SELECTED_NEURONS; ++i ) + { + unsigned neuron = dis_neuron( rng ); + while ( layer->neuronEliminated( neuron ) ) + { + neuron = dis_neuron( rng ); + } + neuronList.append( NeuronIndex( index, neuron ) ); + } + break; + } + + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT: + { + double maxScore = 0; + unsigned maxScoreIndex = 0; + Map neuronIndexToScore; + for ( const auto &pair : _layerIndexToLayer ) + { + double score = 0; + unsigned index = pair.first; + Layer *layer = pair.second; + unsigned layerSize = layer->getSize(); + + if ( layer->getLayerType() == Layer::WEIGHTED_SUM ) + { + continue; + } + + for ( unsigned i = 0; i < layerSize; ++i ) + { + if ( !( layer->neuronEliminated( i ) ) ) + { + double neuronScore = std::pow( + ( _outputLayerSymbolicLb[index][i] + _outputLayerSymbolicUb[index][i] ) / + 2.0, + 2 ); + neuronIndexToScore.insert( NeuronIndex( index, i ), neuronScore ); + score += neuronScore; + } + } + + if ( score > maxScore ) + { + maxScore = score; + maxScoreIndex = index; + } + } + + Layer *layer = _layerIndexToLayer[maxScoreIndex]; + unsigned layerSize = layer->getSize(); + std::priority_queue, + std::vector>, + std::less>> + max_priority_queue; + for ( unsigned i = 0; i < layerSize; ++i ) + { + if ( !( layer->neuronEliminated( i ) ) ) + { + double neuronScore = neuronIndexToScore[NeuronIndex( maxScoreIndex, i )]; + max_priority_queue.push( std::pair( neuronScore, i ) ); + } + } + + for ( unsigned i = 0; i < GlobalConfiguration::PMNR_SELECTED_NEURONS; ++i ) + { + unsigned neuron = max_priority_queue.top().second; + neuronList.append( NeuronIndex( maxScoreIndex, neuron ) ); + max_priority_queue.pop(); + } + break; + } + + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS: + { + double maxScore = 0; + unsigned maxScoreIndex = 0; + Map neuronIndexToScore; + for ( const auto &pair : _layerIndexToLayer ) + { + double score = 0; + unsigned index = pair.first; + Layer *layer = pair.second; + // unsigned layerSize = layer->getSize(); + + if ( layer->getLayerType() == Layer::WEIGHTED_SUM ) + { + continue; + } + + /*double *Lbs = layer->getLbs(); + double *Ubs = layer->getUbs(); + double *symbolicLb = layer->getSymbolicLb(); + double *symbolicUb = layer->getSymbolicUb(); + double *symbolicLowerBias = layer->getSymbolicLowerBias(); + double *symbolicUpperBias = layer->getSymbolicUpperBias(); + for ( unsigned i = 0; i < layerSize; ++i ) + { + if + ( !( layer->neuronEliminated( i ) ) ) + { + double neuronScore = _outputLayerSymbolicLowerBias[i]; + if ( _outputLayerSymbolicLb[index][i] > 0 ) + { + neuronScore += std::max( 0, _outputLayerSymbolicLb[index][i] ) * + symbolicLb[i] + std::min( 0, _outputLayerSymbolicLb[index][i] ) * + symbolicUb[i] + } + + std::pow( ( _outputLayerSymbolicLb[index][i] + + _outputLayerSymbolicUb[index][i] ) / 2.0, 2 ); neuronIndexToScore.insert( NeuronIndex( + index, i ), neuronScore ); score += neuronScore; + } + }*/ + + if ( score > maxScore ) + { + maxScore = score; + maxScoreIndex = index; + } + } + + Layer *layer = _layerIndexToLayer[maxScoreIndex]; + unsigned layerSize = layer->getSize(); + std::priority_queue, + std::vector>, + std::less>> + max_priority_queue; + for ( unsigned i = 0; i < layerSize; ++i ) + { + if ( !( layer->neuronEliminated( i ) ) ) + { + double neuronScore = neuronIndexToScore[NeuronIndex( maxScoreIndex, i )]; + max_priority_queue.push( std::pair( neuronScore, i ) ); + } + } + + for ( unsigned i = 0; i < GlobalConfiguration::PMNR_SELECTED_NEURONS; ++i ) + { + unsigned neuron = max_priority_queue.top().second; + neuronList.append( NeuronIndex( maxScoreIndex, neuron ) ); + max_priority_queue.pop(); + } + break; + } + + default: + break; + } + + const List list( neuronList ); + return list; +} + +const Map> +NetworkLevelReasoner::getParametersForLayers( const Map &layers, + const Vector &coeffs ) const +{ + unsigned index = 0; + Map> layerIndicesToParameters; + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + unsigned n_coeffs = getNumberOfParametersPerType( layer->getLayerType() ); + Vector current_coeffs( n_coeffs ); + for ( unsigned i = 0; i < n_coeffs; ++i ) + { + current_coeffs[i] = coeffs[index + i]; + } + layerIndicesToParameters.insert( layerIndex, current_coeffs ); + index += n_coeffs; + } + const Map> parametersForLayers( layerIndicesToParameters ); + return parametersForLayers; +} + +unsigned NetworkLevelReasoner::getNumberOfParameters( const Map &layers ) const +{ + unsigned num = 0; + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + num += getNumberOfParametersPerType( layer->getLayerType() ); + } + return num; +} + +unsigned NetworkLevelReasoner::getNumberOfParametersPerType( Layer::Type t ) const +{ + if ( t == Layer::RELU || t == Layer::LEAKY_RELU ) + return 1; + + if ( t == Layer::SIGN || t == Layer::BILINEAR ) + return 2; + + return 0; +} + +unsigned NetworkLevelReasoner::countNonlinearLayers( const Map &layers ) const +{ + unsigned num = 0; + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + num += layer->getLayerType() != Layer::WEIGHTED_SUM ? 1 : 0; + } + return num; +} + +void NetworkLevelReasoner::initializeOutputLayerSymbolicBounds() +{ + freeMemoryForOutputLayerSBTIfNeeded(); + + // Temporarily add weighted sum layer to the NLR of the same size of the output layer. + Layer *outputLayer = _layerIndexToLayer[getNumberOfLayers() - 1]; + unsigned outputLayerIndex = outputLayer->getLayerIndex(); + unsigned outputLayerSize = outputLayer->getSize(); + unsigned newLayerIndex = outputLayerIndex + 1; + + addLayer( newLayerIndex, Layer::WEIGHTED_SUM, outputLayerSize ); + addLayerDependency( outputLayerIndex, newLayerIndex ); + Layer *newLayer = _layerIndexToLayer[newLayerIndex]; + + for ( unsigned i = 0; i < outputLayerSize; ++i ) + { + setWeight( outputLayerIndex, i, newLayerIndex, i, 1 ); + newLayer->setLb( i, FloatUtils::infinity() ); + newLayer->setUb( i, FloatUtils::negativeInfinity() ); + } + + // Populate symbolic bounds maps via DeepPoly. + allocateMemoryForOutputLayerSBTIfNeeded(); + + if ( _deepPolyAnalysis == nullptr ) + _deepPolyAnalysis = std::unique_ptr( + new DeepPolyAnalysis( this, + true, + &_outputLayerSymbolicLb, + &_outputLayerSymbolicUb, + &_outputLayerSymbolicLowerBias, + &_outputLayerSymbolicUpperBias ) ); + _deepPolyAnalysis->run(); + + // Remove new weighted sum layer. + removeLayerDependency( outputLayerIndex, newLayerIndex ); + _layerIndexToLayer.erase( newLayerIndex ); + if ( newLayer ) + { + delete newLayer; + newLayer = NULL; + } +} + +void NetworkLevelReasoner::allocateMemoryForOutputLayerSBTIfNeeded() +{ + unsigned outputLayerSize = _layerIndexToLayer[getNumberOfLayers() - 1]->getSize(); + + for ( const auto &pair : _layerIndexToLayer ) + { + unsigned layerIndex = pair.first; + Layer *layer = pair.second; + unsigned layerSize = layer->getSize(); + + double *currentSymbolicLb = new double[layerSize * outputLayerSize]; + double *currentSymbolicUb = new double[layerSize * outputLayerSize]; + double *currentSymbolicLowerBias = new double[outputLayerSize]; + double *currentSymbolicUpperBias = new double[outputLayerSize]; + + std::fill_n( currentSymbolicLb, layerSize * outputLayerSize, 0 ); + std::fill_n( currentSymbolicUb, layerSize * outputLayerSize, 0 ); + std::fill_n( currentSymbolicLowerBias, outputLayerSize, 0 ); + std::fill_n( currentSymbolicUpperBias, outputLayerSize, 0 ); + + _outputLayerSymbolicLb.insert( layerIndex, currentSymbolicLb ); + _outputLayerSymbolicUb.insert( layerIndex, currentSymbolicUb ); + _outputLayerSymbolicLowerBias.insert( layerIndex, currentSymbolicLowerBias ); + _outputLayerSymbolicUpperBias.insert( layerIndex, currentSymbolicUpperBias ); + } +} + +void NetworkLevelReasoner::freeMemoryForOutputLayerSBTIfNeeded() +{ + for ( auto &pair : _outputLayerSymbolicLb ) + { + if ( pair.second ) + { + delete[] pair.second; + pair.second = NULL; + } + } + _outputLayerSymbolicLb.clear(); + + for ( auto &pair : _outputLayerSymbolicUb ) + { + if ( pair.second ) + { + delete[] pair.second; + pair.second = NULL; + } + } + _outputLayerSymbolicUb.clear(); + + for ( auto &pair : _outputLayerSymbolicLowerBias ) + { + if ( pair.second ) + { + delete[] pair.second; + pair.second = NULL; + } + } + _outputLayerSymbolicLowerBias.clear(); + + for ( auto &pair : _outputLayerSymbolicUpperBias ) + { + if ( pair.second ) + { + delete[] pair.second; + pair.second = NULL; + } + } + _outputLayerSymbolicUpperBias.clear(); +} void NetworkLevelReasoner::mergeWSLayers( unsigned secondLayerIndex, Map &eliminatedNeurons ) diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index 0e62a9b027..045b61300c 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -18,6 +18,7 @@ #include "DeepPolyAnalysis.h" #include "ITableau.h" +#include "LPFormulator.h" #include "Layer.h" #include "LayerOwner.h" #include "Map.h" @@ -50,6 +51,7 @@ class NetworkLevelReasoner : public LayerOwner */ void addLayer( unsigned layerIndex, Layer::Type type, unsigned layerSize ); void addLayerDependency( unsigned sourceLayer, unsigned targetLayer ); + void removeLayerDependency( unsigned sourceLayer, unsigned targetLayer ); void computeSuccessorLayers(); void setWeight( unsigned sourceLayer, unsigned sourceNeuron, @@ -145,6 +147,15 @@ class NetworkLevelReasoner : public LayerOwner void MILPTighteningForOneLayer( unsigned targetIndex ); void iterativePropagation(); + + void initializeOutputLayerSymbolicBounds(); + + // Return optimizable parameters which minimize parameterised SBT bounds' volume. + const Vector OptimalParameterisedSymbolicBoundTightening(); + + // Optimize biases of generated parameterised polygonal tightenings. + const Vector OptimizeParameterisedPolygonalTightening(); + void receiveTighterBound( Tightening tightening ); void getConstraintTightenings( List &tightenings ); void clearConstraintTightenings(); @@ -197,6 +208,12 @@ class NetworkLevelReasoner : public LayerOwner */ double getPreviousBias( const ReluConstraint *reluConstraint ) const; + + const double *getOutputLayerSymbolicLb( unsigned layerIndex ) const; + const double *getOutputLayerSymbolicUb( unsigned layerIndex ) const; + const double *getOutputLayerSymbolicLowerBias( unsigned layerIndex ) const; + const double *getOutputLayerSymbolicUpperBias( unsigned layerIndex ) const; + /* Finds logically consecutive WS layers and merges them, in order to reduce the total number of layers and variables in the @@ -231,6 +248,10 @@ class NetworkLevelReasoner : public LayerOwner void freeMemoryIfNeeded(); + // Manage memory for symbolic bounds maps. + void allocateMemoryForOutputLayerSBTIfNeeded(); + void freeMemoryForOutputLayerSBTIfNeeded(); + List _constraintsInTopologicalOrder; // Map each neuron to a linear expression representing its weighted sum @@ -261,6 +282,51 @@ class NetworkLevelReasoner : public LayerOwner unsigned outputDimension ); void reduceLayerIndex( unsigned layer, unsigned startIndex ); + void optimizeBoundsWithPreimageApproximation( Map &layers, + LPFormulator &lpFormulator ); + + // Estimate Volume of parameterised symbolic bound tightening. + double EstimateVolume( const Map &layers, const Vector &coeffs ); + + void optimizeBoundsWithInvprop( Map &layers, LPFormulator &lpFormulator ); + + void optimizeBoundsWithPMNR( Map &layers, LPFormulator &lpFormulator ); + + // Optimize biases of generated parameterised polygonal tightenings. + double + OptimizeSingleParameterisedPolygonalTightening( const Map &layers, + PolygonalTightening &tightening, + Vector &prevTightenings ); + + // Get current lower bound for selected parameterised polygonal tightenings' biases. + double + getParameterisdPolygonalTighteningLowerBound( const Map &layers, + const Vector &coeffs, + const Vector &gamma, + PolygonalTightening &tightening, + Vector &prevTightenings ); + + const Vector + generatePolygonalTightenings( const Map &layers ) const; + + // Heuristically select neurons and polygonal tightenings for PMNR. + const List selectConstraints( const Map &layers ) const; + + // Get map containing vector of optimizable parameters for parameterised SBT relaxation for + // every layer index. + const Map> + getParametersForLayers( const Map &layers, + const Vector &coeffs ) const; + + // Get total number of optimizable parameters for parameterised SBT relaxation. + unsigned getNumberOfParameters( const Map &layers ) const; + + // Get number of optimizable parameters for parameterised SBT relaxation per layer type. + unsigned getNumberOfParametersPerType( Layer::Type t ) const; + + // Get total number of non-weighted sum layers for INVPROP. + unsigned countNonlinearLayers( const Map &layers ) const; + /* Store previous biases for each ReLU neuron in a map for getPreviousBias() and BaBSR heuristic @@ -274,6 +340,11 @@ class NetworkLevelReasoner : public LayerOwner to all neurons in the network */ void reindexNeurons(); + + Map _outputLayerSymbolicLb; + Map _outputLayerSymbolicUb; + Map _outputLayerSymbolicLowerBias; + Map _outputLayerSymbolicUpperBias; }; } // namespace NLR diff --git a/src/nlr/tests/Test_DeepPolyAnalysis.h b/src/nlr/tests/Test_DeepPolyAnalysis.h index 14f39a3d40..77c4d11f1a 100644 --- a/src/nlr/tests/Test_DeepPolyAnalysis.h +++ b/src/nlr/tests/Test_DeepPolyAnalysis.h @@ -14,7 +14,6 @@ **/ #include "../../engine/tests/MockTableau.h" -#include "DeepPolySoftmaxElement.h" #include "FloatUtils.h" #include "InputQuery.h" #include "Layer.h" @@ -291,7 +290,7 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeOutputLayerSymbolicBounds() ); /* Input ranges: @@ -1392,22 +1391,22 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite Vector inputUb = { 0, 2, 4 }; Vector input = { -0.5, 1, 2.5 }; - double value = NLR::DeepPolySoftmaxElement::ERLowerBound( input, inputLb, inputUb, 0 ); + double value = NLR::Layer::ERLowerBound( input, inputLb, inputUb, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 0.0114799, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 0 ); + value = NLR::Layer::dERLowerBound( input, inputLb, inputUb, 0, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 0.00563867, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 1 ); + value = NLR::Layer::dERLowerBound( input, inputLb, inputUb, 0, 1 ); TS_ASSERT( FloatUtils::areEqual( value, -0.000838421, 0.00001 ) ); Vector outputLb = { 0.2, 0, 0 }; Vector outputUb = { 0.4, 0.1, 0.1 }; - value = NLR::DeepPolySoftmaxElement::ERUpperBound( input, outputLb, outputUb, 0 ); + value = NLR::Layer::ERUpperBound( input, outputLb, outputUb, 0 ); TS_ASSERT( FloatUtils::areEqual( value, -1.44538, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 0 ); + value = NLR::Layer::dERUpperBound( input, outputLb, outputUb, 0, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 1.96538, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 1 ); + value = NLR::Layer::dERUpperBound( input, outputLb, outputUb, 0, 1 ); TS_ASSERT( FloatUtils::areEqual( value, -0.358535, 0.00001 ) ); } @@ -1416,20 +1415,20 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite Vector inputLb = { -1, 0, 1 }; Vector inputUb = { 0, 2, 3 }; Vector input = { -0.5, 1, 2 }; - double value = NLR::DeepPolySoftmaxElement::LSELowerBound( input, inputLb, inputUb, 0 ); + double value = NLR::Layer::LSELowerBound( input, inputLb, inputUb, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); + value = NLR::Layer::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); + value = NLR::Layer::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); TS_ASSERT( FloatUtils::areEqual( value, -0.00703444, 0.001 ) ); Vector outputLb = { 0.2, 0, 0 }; Vector outputUb = { 0.4, 0.1, 0.1 }; - value = NLR::DeepPolySoftmaxElement::LSEUpperBound( input, outputLb, outputUb, 0 ); + value = NLR::Layer::LSEUpperBound( input, outputLb, outputUb, 0 ); TS_ASSERT( FloatUtils::areEqual( value, -0.164165, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); + value = NLR::Layer::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 0.272204, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); + value = NLR::Layer::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); TS_ASSERT( FloatUtils::areEqual( value, -0.073207, 0.00001 ) ); } diff --git a/src/nlr/tests/Test_LPRelaxation.h b/src/nlr/tests/Test_LPRelaxation.h index 654d5c1af0..49dbd06135 100644 --- a/src/nlr/tests/Test_LPRelaxation.h +++ b/src/nlr/tests/Test_LPRelaxation.h @@ -73,9 +73,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addLayer( 4, NLR::Layer::RELU, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - // Mark layer dependencies for ( unsigned i = 1; i <= 5; ++i ) nlr.addLayerDependency( i - 1, i ); @@ -4536,6 +4533,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 10, 0, Tightening::UB ), Tightening( 11, 1.625, Tightening::LB ), } ); @@ -4605,6 +4604,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + Tightening( 11, 0.8472, Tightening::LB ), } ); @@ -4679,9 +4681,17 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 13, -4.25, Tightening::LB ), Tightening( 13, 3.25, Tightening::UB ), + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + Tightening( 16, 3.25, Tightening::UB ), + Tightening( 17, -11.1417, Tightening::LB ), Tightening( 17, 10, Tightening::UB ), + Tightening( 19, 0, Tightening::LB ), + Tightening( 19, 10, Tightening::UB ), + Tightening( 21, -17.3084, Tightening::LB ), Tightening( 21, 3.2160, Tightening::UB ), } ); @@ -4782,15 +4792,25 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + Tightening( 11, -5, Tightening::LB ), Tightening( 12, -4.6429, Tightening::LB ), Tightening( 13, 8.5519, Tightening::UB ), + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + Tightening( 16, 8.5519, Tightening::UB ), + Tightening( 17, -23.6231, Tightening::LB ), Tightening( 17, 14.0909, Tightening::UB ), Tightening( 18, 2, Tightening::LB ), Tightening( 18, 28.2015, Tightening::UB ), + Tightening( 20, 2, Tightening::LB ), + Tightening( 20, 28.2015, Tightening::UB ), + Tightening( 21, -29.2015, Tightening::LB ), Tightening( 21, 6.5734, Tightening::UB ), } ); @@ -4849,13 +4869,17 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { + Tightening( 4, -0.1, Tightening::LB ), + Tightening( 7, -2, Tightening::LB ), + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.2, Tightening::LB ), + Tightening( 10, -1.8, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), Tightening( 11, 1.4542, Tightening::LB ), - Tightening( 11, 1.4542, Tightening::LB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); @@ -4923,13 +4947,18 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3.1, Tightening::UB ), Tightening( 7, -3.2, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + Tightening( 8, -0.2, Tightening::LB ), Tightening( 8, 3.1, Tightening::UB ), Tightening( 9, -0.32, Tightening::LB ), + Tightening( 9, 4, Tightening::UB ), Tightening( 10, -3.8726, Tightening::LB ), Tightening( 10, 0.03, Tightening::UB ), @@ -5004,28 +5033,24 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - Tightening( 11, -4.5, Tightening::LB ), - Tightening( 11, 8.5, Tightening::UB ), - Tightening( 12, -2.225, Tightening::LB ), - Tightening( 13, 2.975, Tightening::UB ), + Tightening( 7, -0.2, Tightening::LB ), Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 8.5, Tightening::UB ), + Tightening( 12, -2.225, Tightening::LB ), Tightening( 13, 2.975, Tightening::UB ), Tightening( 13, -4.175, Tightening::LB ), - Tightening( 15, -0.2225, Tightening::LB ), - Tightening( 16, 2.975, Tightening::UB ), + Tightening( 14, -0.45, Tightening::LB ), Tightening( 14, 8.5, Tightening::UB ), + Tightening( 15, -0.2225, Tightening::LB ), Tightening( 16, 2.975, Tightening::UB ), Tightening( 16, -0.4175, Tightening::LB ), - Tightening( 17, -11.452, Tightening::LB ), - Tightening( 17, 10.18, Tightening::UB ), - Tightening( 18, 0.87, Tightening::LB ), - Tightening( 18, 16.0688, Tightening::UB ), + Tightening( 17, -11.452, Tightening::LB ), Tightening( 17, 10.18, Tightening::UB ), + Tightening( 18, 0.87, Tightening::LB ), Tightening( 18, 16.0688, Tightening::UB ), - Tightening( 19, -1.1452, Tightening::LB ), - Tightening( 19, 10.18, Tightening::UB ), - Tightening( 20, 0.87, Tightening::LB ), - Tightening( 20, 16.0688, Tightening::UB ), + Tightening( 19, -1.1452, Tightening::LB ), Tightening( 19, 10.18, Tightening::UB ), + Tightening( 20, 0.87, Tightening::LB ), Tightening( 20, 16.0688, Tightening::UB ), - Tightening( 21, -17.0684, Tightening::LB ), - Tightening( 21, 3.6767, Tightening::UB ), + Tightening( 21, -17.0684, Tightening::LB ), Tightening( 21, 3.6767, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); @@ -5124,28 +5149,24 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { - Tightening( 11, -5.6, Tightening::LB ), - Tightening( 11, 16.4636, Tightening::UB ), - Tightening( 12, -6.0286, Tightening::LB ), - Tightening( 13, -5.9, Tightening::LB ), + Tightening( 7, -0.2, Tightening::LB ), Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 11, -5.6, Tightening::LB ), Tightening( 11, 16.4636, Tightening::UB ), + Tightening( 12, -6.0286, Tightening::LB ), Tightening( 13, -5.9, Tightening::LB ), Tightening( 13, 8.0468, Tightening::UB ), - Tightening( 15, -0.6029, Tightening::LB ), - Tightening( 16, -0.59, Tightening::LB ), + Tightening( 14, -0.56, Tightening::LB ), Tightening( 14, 16.4636, Tightening::UB ), + Tightening( 15, -0.6029, Tightening::LB ), Tightening( 16, -0.59, Tightening::LB ), Tightening( 16, 8.0468, Tightening::UB ), - Tightening( 17, -24.8864, Tightening::LB ), - Tightening( 17, 14.3076, Tightening::UB ), - Tightening( 18, 0.75, Tightening::LB ), - Tightening( 18, 28.0272, Tightening::UB ), + Tightening( 17, -24.8864, Tightening::LB ), Tightening( 17, 14.3076, Tightening::UB ), + Tightening( 18, 0.75, Tightening::LB ), Tightening( 18, 28.0272, Tightening::UB ), - Tightening( 19, -2.4886, Tightening::LB ), - Tightening( 19, 14.3076, Tightening::UB ), - Tightening( 20, 0.75, Tightening::LB ), - Tightening( 20, 28.0272, Tightening::UB ), + Tightening( 19, -2.4886, Tightening::LB ), Tightening( 19, 14.3076, Tightening::UB ), + Tightening( 20, 0.75, Tightening::LB ), Tightening( 20, 28.0272, Tightening::UB ), - Tightening( 21, -29.9648, Tightening::LB ), - Tightening( 21, 6.9619, Tightening::UB ), + Tightening( 21, -29.9648, Tightening::LB ), Tightening( 21, 6.9619, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); @@ -5488,6 +5509,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { + Tightening( 10, 8.7411, Tightening::UB ), Tightening( 11, -2.4149, Tightening::LB ), } ); @@ -5556,10 +5578,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + Tightening( 8, 7, Tightening::UB ), Tightening( 9, 5.8235, Tightening::UB ), Tightening( 10, -14, Tightening::LB ), + Tightening( 10, 40.7647, Tightening::UB ), Tightening( 11, -24.8805, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), @@ -5626,7 +5651,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite List expectedBounds2( { Tightening( 12, -1.75, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + + Tightening( 15, -0.0001, Tightening::LB ), + Tightening( 15, 78.8787, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); @@ -5705,10 +5734,16 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + Tightening( 11, -5, Tightening::LB ), Tightening( 12, -4.6429, Tightening::LB ), + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 15, 211.0082, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index e0a0a90015..635949d23c 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -14,7 +14,6 @@ **/ #include "../../engine/tests/MockTableau.h" // TODO: fix this -#include "DeepPolySoftmaxElement.h" #include "FloatUtils.h" #include "Layer.h" #include "NetworkLevelReasoner.h" @@ -1959,9 +1958,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addLayer( 4, NLR::Layer::RELU, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - // Mark layer dependencies for ( unsigned i = 1; i <= 5; ++i ) nlr.addLayerDependency( i - 1, i ); @@ -9647,22 +9643,22 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector inputUb = { 0, 2, 4 }; Vector input = { -0.5, 1, 2.5 }; - double value = NLR::DeepPolySoftmaxElement::ERLowerBound( input, inputLb, inputUb, 0 ); + double value = NLR::Layer::ERLowerBound( input, inputLb, inputUb, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 0.0114799, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 0 ); + value = NLR::Layer::dERLowerBound( input, inputLb, inputUb, 0, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 0.00563867, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 1 ); + value = NLR::Layer::dERLowerBound( input, inputLb, inputUb, 0, 1 ); TS_ASSERT( FloatUtils::areEqual( value, -0.000838421, 0.00001 ) ); Vector outputLb = { 0.2, 0, 0 }; Vector outputUb = { 0.4, 0.1, 0.1 }; - value = NLR::DeepPolySoftmaxElement::ERUpperBound( input, outputLb, outputUb, 0 ); + value = NLR::Layer::ERUpperBound( input, outputLb, outputUb, 0 ); TS_ASSERT( FloatUtils::areEqual( value, -1.44538, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 0 ); + value = NLR::Layer::dERUpperBound( input, outputLb, outputUb, 0, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 1.96538, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 1 ); + value = NLR::Layer::dERUpperBound( input, outputLb, outputUb, 0, 1 ); TS_ASSERT( FloatUtils::areEqual( value, -0.358535, 0.00001 ) ); } @@ -9671,20 +9667,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector inputLb = { -1, 0, 1 }; Vector inputUb = { 0, 2, 3 }; Vector input = { -0.5, 1, 2 }; - double value = NLR::DeepPolySoftmaxElement::LSELowerBound( input, inputLb, inputUb, 0 ); + double value = NLR::Layer::LSELowerBound( input, inputLb, inputUb, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); + value = NLR::Layer::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); + value = NLR::Layer::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); TS_ASSERT( FloatUtils::areEqual( value, -0.00703444, 0.001 ) ); Vector outputLb = { 0.2, 0, 0 }; Vector outputUb = { 0.4, 0.1, 0.1 }; - value = NLR::DeepPolySoftmaxElement::LSEUpperBound( input, outputLb, outputUb, 0 ); + value = NLR::Layer::LSEUpperBound( input, outputLb, outputUb, 0 ); TS_ASSERT( FloatUtils::areEqual( value, -0.164165, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); + value = NLR::Layer::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 0.272204, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); + value = NLR::Layer::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); TS_ASSERT( FloatUtils::areEqual( value, -0.073207, 0.00001 ) ); } From 7c08e1eba93597b55ce78b4d1d9fb2659fd7323b Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 24 Jul 2025 20:36:16 +0300 Subject: [PATCH 67/81] Current progress in PMNR: 0. (previously)Temporary four new options for MILP_SOLVER_BOUND_TIGHTENING_TYPE. 1. Extending initializeSymbolicBoundsMaps using DeepPoly module. 2. Implement generatePolygonalTightenings, getParameterisdPolygonalTighteningLowerBound and helper functions. 3. Corrections in Layer, OptionParser. 4. Creating new test file Test_PMNR.h. --- src/configuration/OptionParser.cpp | 2 +- src/nlr/CMakeLists.txt | 1 + src/nlr/DeepPolyAbsoluteValueElement.cpp | 22 + src/nlr/DeepPolyAbsoluteValueElement.h | 2 + src/nlr/DeepPolyAnalysis.cpp | 36 +- src/nlr/DeepPolyAnalysis.h | 17 +- src/nlr/DeepPolyBilinearElement.cpp | 23 + src/nlr/DeepPolyBilinearElement.h | 2 + src/nlr/DeepPolyElement.cpp | 29 +- src/nlr/DeepPolyElement.h | 46 +- src/nlr/DeepPolyLeakyReLUElement.cpp | 23 + src/nlr/DeepPolyLeakyReLUElement.h | 2 + src/nlr/DeepPolyMaxPoolElement.cpp | 42 + src/nlr/DeepPolyMaxPoolElement.h | 4 + src/nlr/DeepPolyReLUElement.cpp | 22 + src/nlr/DeepPolyReLUElement.h | 2 + src/nlr/DeepPolyRoundElement.cpp | 22 + src/nlr/DeepPolyRoundElement.h | 2 + src/nlr/DeepPolySigmoidElement.cpp | 22 + src/nlr/DeepPolySigmoidElement.h | 2 + src/nlr/DeepPolySignElement.cpp | 22 + src/nlr/DeepPolySignElement.h | 2 + src/nlr/DeepPolySoftmaxElement.cpp | 18 + src/nlr/DeepPolySoftmaxElement.h | 3 + src/nlr/DeepPolyWeightedSumElement.cpp | 40 +- src/nlr/DeepPolyWeightedSumElement.h | 7 + src/nlr/Layer.cpp | 17 +- src/nlr/Layer.h | 11 +- src/nlr/NetworkLevelReasoner.cpp | 842 +- src/nlr/NetworkLevelReasoner.h | 40 +- src/nlr/tests/Test_DeepPolyAnalysis.h | 60 +- src/nlr/tests/Test_PMNR.h | 11387 +++++++++++++++++++++ 32 files changed, 12445 insertions(+), 327 deletions(-) create mode 100644 src/nlr/tests/Test_PMNR.h diff --git a/src/configuration/OptionParser.cpp b/src/configuration/OptionParser.cpp index cd1c182fce..fd5a0916b5 100644 --- a/src/configuration/OptionParser.cpp +++ b/src/configuration/OptionParser.cpp @@ -269,7 +269,7 @@ void OptionParser::initialize() "The MILP solver bound tightening type: " "lp/backward-once/backward-converge/backward-preimage-approx/backward-invprop/" "backward-pmnr-random/" - "backward-pmnr-gradient/backward-pmnr-bbps-heuristic/lp-inc/milp/milp-inc/" + "backward-pmnr-gradient/backward-pmnr-bbps/lp-inc/milp/milp-inc/" "iter-prop/none." ) #endif ; diff --git a/src/nlr/CMakeLists.txt b/src/nlr/CMakeLists.txt index e377a638ba..c70502cd15 100644 --- a/src/nlr/CMakeLists.txt +++ b/src/nlr/CMakeLists.txt @@ -21,6 +21,7 @@ network_level_reasoner_add_unit_test(ParallelSolver) if (${ENABLE_GUROBI}) network_level_reasoner_add_unit_test(LPRelaxation) + network_level_reasoner_add_unit_test(PMNR) endif() if (${BUILD_PYTHON}) diff --git a/src/nlr/DeepPolyAbsoluteValueElement.cpp b/src/nlr/DeepPolyAbsoluteValueElement.cpp index 75d7d13461..73cfd5209c 100644 --- a/src/nlr/DeepPolyAbsoluteValueElement.cpp +++ b/src/nlr/DeepPolyAbsoluteValueElement.cpp @@ -94,9 +94,31 @@ void DeepPolyAbsoluteValueElement::execute( _symbolicUpperBias[i] ) ); log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); } + + if ( _storeSymbolicBoundsInTermsOfPredecessor ) + { + storePredecessorSymbolicBounds(); + } + log( "Executing - done" ); } +void DeepPolyAbsoluteValueElement::storePredecessorSymbolicBounds() +{ + double *currentSymbolicLb = ( *_symbolicLbInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicUb = ( *_symbolicUbInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicLowerBias = ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicUpperBias = ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex]; + + for ( unsigned i = 0; i < _size; ++i ) + { + currentSymbolicLb[i * _size] = _symbolicLb[i]; + currentSymbolicUb[i * _size] = _symbolicUb[i]; + currentSymbolicLowerBias[i] = _symbolicLowerBias[i]; + currentSymbolicUpperBias[i] = _symbolicUpperBias[i]; + } +} + void DeepPolyAbsoluteValueElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, diff --git a/src/nlr/DeepPolyAbsoluteValueElement.h b/src/nlr/DeepPolyAbsoluteValueElement.h index 732320220e..2d97086ae9 100644 --- a/src/nlr/DeepPolyAbsoluteValueElement.h +++ b/src/nlr/DeepPolyAbsoluteValueElement.h @@ -33,6 +33,8 @@ class DeepPolyAbsoluteValueElement : public DeepPolyElement void execute( const Map &deepPolyElementsBefore ); + void storePredecessorSymbolicBounds(); + void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolyAnalysis.cpp b/src/nlr/DeepPolyAnalysis.cpp index d97c23edba..8e80ed1774 100644 --- a/src/nlr/DeepPolyAnalysis.cpp +++ b/src/nlr/DeepPolyAnalysis.cpp @@ -40,13 +40,19 @@ namespace NLR { DeepPolyAnalysis::DeepPolyAnalysis( LayerOwner *layerOwner, - bool storeSymbolicBounds, + bool storeOutputLayerSymbolicBounds, + bool storeSymbolicBoundsInTermsOfPredecessor, Map *outputLayerSymbolicLb, Map *outputLayerSymbolicUb, Map *outputLayerSymbolicLowerBias, - Map *outputLayerSymbolicUpperBias ) + Map *outputLayerSymbolicUpperBias, + Map *symbolicLbInTermsOfPredecessor, + Map *symbolicUbInTermsOfPredecessor, + Map *symbolicLowerBiasInTermsOfPredecessor, + Map *symbolicUpperBiasInTermsOfPredecessor ) : _layerOwner( layerOwner ) - , _storeSymbolicBounds( storeSymbolicBounds ) + , _storeOutputLayerSymbolicBounds( storeOutputLayerSymbolicBounds ) + , _storeSymbolicBoundsInTermsOfPredecessor( storeSymbolicBoundsInTermsOfPredecessor ) , _work1SymbolicLb( NULL ) , _work1SymbolicUb( NULL ) , _work2SymbolicLb( NULL ) @@ -57,6 +63,10 @@ DeepPolyAnalysis::DeepPolyAnalysis( LayerOwner *layerOwner, , _outputLayerSymbolicUb( outputLayerSymbolicUb ) , _outputLayerSymbolicLowerBias( outputLayerSymbolicLowerBias ) , _outputLayerSymbolicUpperBias( outputLayerSymbolicUpperBias ) + , _symbolicLbInTermsOfPredecessor( symbolicLbInTermsOfPredecessor ) + , _symbolicUbInTermsOfPredecessor( symbolicUbInTermsOfPredecessor ) + , _symbolicLowerBiasInTermsOfPredecessor( symbolicLowerBiasInTermsOfPredecessor ) + , _symbolicUpperBiasInTermsOfPredecessor( symbolicUpperBiasInTermsOfPredecessor ) { const Map &layers = _layerOwner->getLayerIndexToLayer(); // Get the maximal layer size @@ -158,7 +168,7 @@ void DeepPolyAnalysis::run() { if ( layer->neuronEliminated( j ) ) continue; - if ( _storeSymbolicBounds && index == _layerOwner->getNumberOfLayers() - 1 ) + if ( _storeOutputLayerSymbolicBounds && index == _layerOwner->getNumberOfLayers() - 1 ) continue; double lb = deepPolyElement->getLowerBound( j ); if ( layer->getLb( j ) < lb ) @@ -250,14 +260,20 @@ DeepPolyElement *DeepPolyAnalysis::createDeepPolyElement( Layer *layer ) throw NLRError( NLRError::LAYER_TYPE_NOT_SUPPORTED, Stringf( "Layer %u not yet supported", layer->getLayerType() ).ascii() ); - if ( _storeSymbolicBounds && layer->getLayerIndex() == _layerOwner->getNumberOfLayers() - 1 ) + deepPolyElement->setStoreSymbolicBoundsInTermsOfPredecessor( + _storeSymbolicBoundsInTermsOfPredecessor ); + if ( layer->getLayerIndex() == _layerOwner->getNumberOfLayers() - 1 ) { - deepPolyElement->setStoreSymbolicBounds( true ); + deepPolyElement->setStoreOutputLayerSymbolicBounds( _storeOutputLayerSymbolicBounds ); } - deepPolyElement->setOutputLayerSymbolicBoundsMemory( _outputLayerSymbolicLb, - _outputLayerSymbolicUb, - _outputLayerSymbolicLowerBias, - _outputLayerSymbolicUpperBias ); + deepPolyElement->setSymbolicBoundsMemory( _outputLayerSymbolicLb, + _outputLayerSymbolicUb, + _outputLayerSymbolicLowerBias, + _outputLayerSymbolicUpperBias, + _symbolicLbInTermsOfPredecessor, + _symbolicUbInTermsOfPredecessor, + _symbolicLowerBiasInTermsOfPredecessor, + _symbolicUpperBiasInTermsOfPredecessor ); return deepPolyElement; } diff --git a/src/nlr/DeepPolyAnalysis.h b/src/nlr/DeepPolyAnalysis.h index e2066874b6..76eb25e69f 100644 --- a/src/nlr/DeepPolyAnalysis.h +++ b/src/nlr/DeepPolyAnalysis.h @@ -29,18 +29,24 @@ class DeepPolyAnalysis { public: DeepPolyAnalysis( LayerOwner *layerOwner, - bool storeSymbolicBounds = false, + bool storeOutputLayerSymbolicBounds = false, + bool storeSymbolicBoundsInTermsOfPredecessor = false, Map *outputLayerSymbolicLb = NULL, Map *outputLayerSymbolicUb = NULL, Map *outputLayerSymbolicLowerBias = NULL, - Map *outputLayerSymbolicUpperBias = NULL ); + Map *outputLayerSymbolicUpperBias = NULL, + Map *symbolicLbInTermsOfPredecessor = NULL, + Map *symbolicUbInTermsOfPredecessor = NULL, + Map *symbolicLowerBiasInTermsOfPredecessor = NULL, + Map *symbolicUpperBiasInTermsOfPredecessor = NULL ); ~DeepPolyAnalysis(); void run(); private: LayerOwner *_layerOwner; - bool _storeSymbolicBounds; + bool _storeOutputLayerSymbolicBounds; + bool _storeSymbolicBoundsInTermsOfPredecessor; /* Maps layer index to the abstract element @@ -62,6 +68,11 @@ class DeepPolyAnalysis Map *_outputLayerSymbolicLowerBias; Map *_outputLayerSymbolicUpperBias; + Map *_symbolicLbInTermsOfPredecessor; + Map *_symbolicUbInTermsOfPredecessor; + Map *_symbolicLowerBiasInTermsOfPredecessor; + Map *_symbolicUpperBiasInTermsOfPredecessor; + unsigned _maxLayerSize; void allocateMemory(); diff --git a/src/nlr/DeepPolyBilinearElement.cpp b/src/nlr/DeepPolyBilinearElement.cpp index e8d2aef311..8583bdd99b 100644 --- a/src/nlr/DeepPolyBilinearElement.cpp +++ b/src/nlr/DeepPolyBilinearElement.cpp @@ -104,6 +104,11 @@ void DeepPolyBilinearElement::execute( _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; } + if ( _storeSymbolicBoundsInTermsOfPredecessor ) + { + storePredecessorSymbolicBounds(); + } + DEBUG( { for ( unsigned i = 0; i < _size; ++i ) { @@ -115,6 +120,24 @@ void DeepPolyBilinearElement::execute( log( "Executing - done" ); } +void DeepPolyBilinearElement::storePredecessorSymbolicBounds() +{ + double *currentSymbolicLb = ( *_symbolicLbInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicUb = ( *_symbolicUbInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicLowerBias = ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicUpperBias = ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex]; + + for ( unsigned i = 0; i < _size; ++i ) + { + currentSymbolicLb[i * _size] = _symbolicLbA[i]; + currentSymbolicUb[i * _size] = _symbolicUbA[i]; + currentSymbolicLb[i * _size + 1] = _symbolicLbB[i]; + currentSymbolicUb[i * _size + 1] = _symbolicUbB[i]; + currentSymbolicLowerBias[i] = _symbolicLowerBias[i]; + currentSymbolicUpperBias[i] = _symbolicUpperBias[i]; + } +} + void DeepPolyBilinearElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, diff --git a/src/nlr/DeepPolyBilinearElement.h b/src/nlr/DeepPolyBilinearElement.h index 8b0ce8dcce..6e8d82e8c6 100644 --- a/src/nlr/DeepPolyBilinearElement.h +++ b/src/nlr/DeepPolyBilinearElement.h @@ -33,6 +33,8 @@ class DeepPolyBilinearElement : public DeepPolyElement void execute( const Map &deepPolyElementsBefore ); + void storePredecessorSymbolicBounds(); + void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolyElement.cpp b/src/nlr/DeepPolyElement.cpp index 86afb43afd..8f21cd30e4 100644 --- a/src/nlr/DeepPolyElement.cpp +++ b/src/nlr/DeepPolyElement.cpp @@ -17,11 +17,12 @@ namespace NLR { -DeepPolyElement::DeepPolyElement( bool storeSymbolicBounds ) +DeepPolyElement::DeepPolyElement() : _layer( NULL ) , _size( 0 ) , _layerIndex( 0 ) - , _storeSymbolicBounds( storeSymbolicBounds ) + , _storeOutputLayerSymbolicBounds( false ) + , _storeSymbolicBoundsInTermsOfPredecessor( false ) , _symbolicLb( NULL ) , _symbolicUb( NULL ) , _symbolicLowerBias( NULL ) @@ -93,9 +94,15 @@ double DeepPolyElement::getUpperBound( unsigned index ) const return _ub[index]; } -void DeepPolyElement::setStoreSymbolicBounds( bool storeSymbolicBounds ) +void DeepPolyElement::setStoreOutputLayerSymbolicBounds( bool storeOutputLayerSymbolicBounds ) { - _storeSymbolicBounds = storeSymbolicBounds; + _storeOutputLayerSymbolicBounds = storeOutputLayerSymbolicBounds; +} + +void DeepPolyElement::setStoreSymbolicBoundsInTermsOfPredecessor( + bool storeSymbolicBoundsInTermsOfPredecessor ) +{ + _storeSymbolicBoundsInTermsOfPredecessor = storeSymbolicBoundsInTermsOfPredecessor; } double DeepPolyElement::getLowerBoundFromLayer( unsigned index ) const @@ -162,19 +169,27 @@ void DeepPolyElement::setWorkingMemory( double *work1SymbolicLb, _workSymbolicUpperBias = workSymbolicUpperBias; } -void DeepPolyElement::setOutputLayerSymbolicBoundsMemory( +void DeepPolyElement::setSymbolicBoundsMemory( Map *outputLayerSymbolicLb, Map *outputLayerSymbolicUb, Map *outputLayerSymbolicLowerBias, - Map *outputLayerSymbolicUpperBias ) + Map *outputLayerSymbolicUpperBias, + Map *symbolicLbInTermsOfPredecessor, + Map *symbolicUbInTermsOfPredecessor, + Map *symbolicLowerBiasInTermsOfPredecessor, + Map *symbolicUpperBiasInTermsOfPredecessor ) { _outputLayerSymbolicLb = outputLayerSymbolicLb; _outputLayerSymbolicUb = outputLayerSymbolicUb; _outputLayerSymbolicLowerBias = outputLayerSymbolicLowerBias; _outputLayerSymbolicUpperBias = outputLayerSymbolicUpperBias; + _symbolicLbInTermsOfPredecessor = symbolicLbInTermsOfPredecessor; + _symbolicUbInTermsOfPredecessor = symbolicUbInTermsOfPredecessor; + _symbolicLowerBiasInTermsOfPredecessor = symbolicLowerBiasInTermsOfPredecessor; + _symbolicUpperBiasInTermsOfPredecessor = symbolicUpperBiasInTermsOfPredecessor; } -void DeepPolyElement::storeWorkSymbolicBounds( +void DeepPolyElement::storeOutputSymbolicBounds( unsigned sourceLayerSize, double *work1SymbolicLb, double *work1SymbolicUb, diff --git a/src/nlr/DeepPolyElement.h b/src/nlr/DeepPolyElement.h index b4be631490..2fbd0b0a19 100644 --- a/src/nlr/DeepPolyElement.h +++ b/src/nlr/DeepPolyElement.h @@ -28,7 +28,7 @@ namespace NLR { class DeepPolyElement { public: - DeepPolyElement( bool storeSymbolicBounds = false ); + DeepPolyElement(); virtual ~DeepPolyElement(){}; // execute the abstract layer based on the abstract layers topologically @@ -68,7 +68,9 @@ class DeepPolyElement double *getSymbolicUpperBias() const; double getLowerBound( unsigned index ) const; double getUpperBound( unsigned index ) const; - void setStoreSymbolicBounds( bool storeSymbolicBounds ); + + void setStoreOutputLayerSymbolicBounds( bool storeOutputLayerSymbolicBounds ); + void setStoreSymbolicBoundsInTermsOfPredecessor( bool storeSymbolicBoundsInTermsOfPredecessor ); void setWorkingMemory( double *work1SymbolicLb, double *work1SymbolicUb, @@ -77,21 +79,25 @@ class DeepPolyElement double *workSymbolicLowerBias, double *workSymbolicUpperBias ); + void setSymbolicBoundsMemory( Map *outputLayerSymbolicLb, + Map *outputLayerSymbolicUb, + Map *outputLayerSymbolicLowerBias, + Map *outputLayerSymbolicUpperBias, + Map *symbolicLbInTermsOfPredecessor, + Map *symbolicUbInTermsOfPredecessor, + Map *symbolicLowerBiasInTermsOfPredecessor, + Map *symbolicUpperBiasInTermsOfPredecessor ); + void - setOutputLayerSymbolicBoundsMemory( Map *outputLayerSymbolicLb, - Map *outputLayerSymbolicUb, - Map *outputLayerSymbolicLowerBias, - Map *outputLayerSymbolicUpperBias ); - - void storeWorkSymbolicBounds( unsigned sourceLayerSize, - double *work1SymbolicLb, - double *work1SymbolicUb, - double *workSymbolicLowerBias, - double *workSymbolicUpperBias, - Map &residualLb, - Map &residualUb, - Set &residualLayerIndices, - const Map &deepPolyElementsBefore ); + storeOutputSymbolicBounds( unsigned sourceLayerSize, + double *work1SymbolicLb, + double *work1SymbolicUb, + double *workSymbolicLowerBias, + double *workSymbolicUpperBias, + Map &residualLb, + Map &residualUb, + Set &residualLayerIndices, + const Map &deepPolyElementsBefore ); double getLowerBoundFromLayer( unsigned index ) const; double getUpperBoundFromLayer( unsigned index ) const; @@ -100,7 +106,8 @@ class DeepPolyElement Layer *_layer; unsigned _size; unsigned _layerIndex; - bool _storeSymbolicBounds; + bool _storeOutputLayerSymbolicBounds; + bool _storeSymbolicBoundsInTermsOfPredecessor; /* Abstract element described in @@ -126,6 +133,11 @@ class DeepPolyElement Map *_outputLayerSymbolicLowerBias; Map *_outputLayerSymbolicUpperBias; + Map *_symbolicLbInTermsOfPredecessor; + Map *_symbolicUbInTermsOfPredecessor; + Map *_symbolicLowerBiasInTermsOfPredecessor; + Map *_symbolicUpperBiasInTermsOfPredecessor; + void allocateMemory(); void freeMemoryIfNeeded(); diff --git a/src/nlr/DeepPolyLeakyReLUElement.cpp b/src/nlr/DeepPolyLeakyReLUElement.cpp index f944d3ac73..ea4ef5a555 100644 --- a/src/nlr/DeepPolyLeakyReLUElement.cpp +++ b/src/nlr/DeepPolyLeakyReLUElement.cpp @@ -142,9 +142,32 @@ void DeepPolyLeakyReLUElement::execute( _symbolicUpperBias[i] ) ); log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); } + + + if ( _storeSymbolicBoundsInTermsOfPredecessor ) + { + storePredecessorSymbolicBounds(); + } + log( "Executing - done" ); } +void DeepPolyLeakyReLUElement::storePredecessorSymbolicBounds() +{ + double *currentSymbolicLb = ( *_symbolicLbInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicUb = ( *_symbolicUbInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicLowerBias = ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicUpperBias = ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex]; + + for ( unsigned i = 0; i < _size; ++i ) + { + currentSymbolicLb[i * _size] = _symbolicLb[i]; + currentSymbolicUb[i * _size] = _symbolicUb[i]; + currentSymbolicLowerBias[i] = _symbolicLowerBias[i]; + currentSymbolicUpperBias[i] = _symbolicUpperBias[i]; + } +} + void DeepPolyLeakyReLUElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, diff --git a/src/nlr/DeepPolyLeakyReLUElement.h b/src/nlr/DeepPolyLeakyReLUElement.h index 01ae5876b3..4349836eac 100644 --- a/src/nlr/DeepPolyLeakyReLUElement.h +++ b/src/nlr/DeepPolyLeakyReLUElement.h @@ -33,6 +33,8 @@ class DeepPolyLeakyReLUElement : public DeepPolyElement void execute( const Map &deepPolyElementsBefore ); + void storePredecessorSymbolicBounds(); + void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolyMaxPoolElement.cpp b/src/nlr/DeepPolyMaxPoolElement.cpp index 43f7084978..89c7921a46 100644 --- a/src/nlr/DeepPolyMaxPoolElement.cpp +++ b/src/nlr/DeepPolyMaxPoolElement.cpp @@ -40,6 +40,9 @@ void DeepPolyMaxPoolElement::execute( // Update the symbolic and concrete upper- and lower- bounds // of each neuron + Vector phasesFixed( _size ); + Vector maxLowerBoundIndices( _size ); + Vector maxUpperBounds( _size ); for ( unsigned i = 0; i < _size; ++i ) { log( Stringf( "Handling Neuron %u_%u...", _layerIndex, i ) ); @@ -83,6 +86,10 @@ void DeepPolyMaxPoolElement::execute( } } + phasesFixed[i] = phaseFixed ? 1 : 0; + maxLowerBoundIndices[i] = indexOfMaxLowerBound._neuron; + maxUpperBounds[i] = maxUpperBound; + if ( phaseFixed ) { log( Stringf( "Neuron %u_%u fixed to Neuron %u_%u", @@ -110,9 +117,44 @@ void DeepPolyMaxPoolElement::execute( log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); log( Stringf( "Handling Neuron %u_%u - done", _layerIndex, i ) ); } + + if ( _storeSymbolicBoundsInTermsOfPredecessor ) + { + storePredecessorSymbolicBounds( phasesFixed, maxLowerBoundIndices, maxUpperBounds ); + } + log( "Executing - done" ); } +void DeepPolyMaxPoolElement::storePredecessorSymbolicBounds( + const Vector &phaseFixed, + const Vector &indexOfMaxLowerBound, + const Vector &maxUpperBound ) +{ + double *currentSymbolicLb = ( *_symbolicLbInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicUb = ( *_symbolicUbInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicLowerBias = ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicUpperBias = ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex]; + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( phaseFixed[i] > 0 ) + { + currentSymbolicLb[i * _size + indexOfMaxLowerBound[i]] = 1; + currentSymbolicUb[i * _size + indexOfMaxLowerBound[i]] = 1; + currentSymbolicLowerBias[i] = 0; + currentSymbolicUpperBias[i] = 0; + } + else + { + currentSymbolicLb[i * _size + indexOfMaxLowerBound[i]] = 1; + currentSymbolicUb[i * _size + indexOfMaxLowerBound[i]] = 0; + currentSymbolicLowerBias[i] = 0; + currentSymbolicUpperBias[i] = maxUpperBound[i]; + } + } +} + void DeepPolyMaxPoolElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, diff --git a/src/nlr/DeepPolyMaxPoolElement.h b/src/nlr/DeepPolyMaxPoolElement.h index 9b16cd2257..2573ac8c4b 100644 --- a/src/nlr/DeepPolyMaxPoolElement.h +++ b/src/nlr/DeepPolyMaxPoolElement.h @@ -34,6 +34,10 @@ class DeepPolyMaxPoolElement : public DeepPolyElement void execute( const Map &deepPolyElementsBefore ); + void storePredecessorSymbolicBounds( const Vector &phaseFixed, + const Vector &indexOfMaxLowerBound, + const Vector &maxUpperBound ); + void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolyReLUElement.cpp b/src/nlr/DeepPolyReLUElement.cpp index 678c1edc5c..aa2e6ef80d 100644 --- a/src/nlr/DeepPolyReLUElement.cpp +++ b/src/nlr/DeepPolyReLUElement.cpp @@ -114,9 +114,31 @@ void DeepPolyReLUElement::execute( const Map &deepP _symbolicUpperBias[i] ) ); log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); } + + if ( _storeSymbolicBoundsInTermsOfPredecessor ) + { + storePredecessorSymbolicBounds(); + } + log( "Executing - done" ); } +void DeepPolyReLUElement::storePredecessorSymbolicBounds() +{ + double *currentSymbolicLb = ( *_symbolicLbInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicUb = ( *_symbolicUbInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicLowerBias = ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicUpperBias = ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex]; + + for ( unsigned i = 0; i < _size; ++i ) + { + currentSymbolicLb[i * _size] = _symbolicLb[i]; + currentSymbolicUb[i * _size] = _symbolicUb[i]; + currentSymbolicLowerBias[i] = _symbolicLowerBias[i]; + currentSymbolicUpperBias[i] = _symbolicUpperBias[i]; + } +} + void DeepPolyReLUElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolyReLUElement.h b/src/nlr/DeepPolyReLUElement.h index 69b6b0ccdd..2a7bbc0be7 100644 --- a/src/nlr/DeepPolyReLUElement.h +++ b/src/nlr/DeepPolyReLUElement.h @@ -33,6 +33,8 @@ class DeepPolyReLUElement : public DeepPolyElement void execute( const Map &deepPolyElementsBefore ); + void storePredecessorSymbolicBounds(); + void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolyRoundElement.cpp b/src/nlr/DeepPolyRoundElement.cpp index fbb9565f61..a0ab150d09 100644 --- a/src/nlr/DeepPolyRoundElement.cpp +++ b/src/nlr/DeepPolyRoundElement.cpp @@ -82,9 +82,31 @@ void DeepPolyRoundElement::execute( const Map &deep _symbolicUpperBias[i] ) ); log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); } + + if ( _storeSymbolicBoundsInTermsOfPredecessor ) + { + storePredecessorSymbolicBounds(); + } + log( "Executing - done" ); } +void DeepPolyRoundElement::storePredecessorSymbolicBounds() +{ + double *currentSymbolicLb = ( *_symbolicLbInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicUb = ( *_symbolicUbInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicLowerBias = ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicUpperBias = ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex]; + + for ( unsigned i = 0; i < _size; ++i ) + { + currentSymbolicLb[i * _size] = _symbolicLb[i]; + currentSymbolicUb[i * _size] = _symbolicUb[i]; + currentSymbolicLowerBias[i] = _symbolicLowerBias[i]; + currentSymbolicUpperBias[i] = _symbolicUpperBias[i]; + } +} + void DeepPolyRoundElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, diff --git a/src/nlr/DeepPolyRoundElement.h b/src/nlr/DeepPolyRoundElement.h index b213c97e0e..d82695e59e 100644 --- a/src/nlr/DeepPolyRoundElement.h +++ b/src/nlr/DeepPolyRoundElement.h @@ -33,6 +33,8 @@ class DeepPolyRoundElement : public DeepPolyElement void execute( const Map &deepPolyElementsBefore ); + void storePredecessorSymbolicBounds(); + void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolySigmoidElement.cpp b/src/nlr/DeepPolySigmoidElement.cpp index 0aee3e1889..419536564c 100644 --- a/src/nlr/DeepPolySigmoidElement.cpp +++ b/src/nlr/DeepPolySigmoidElement.cpp @@ -99,9 +99,31 @@ void DeepPolySigmoidElement::execute( _symbolicUpperBias[i] ) ); log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); } + + if ( _storeSymbolicBoundsInTermsOfPredecessor ) + { + storePredecessorSymbolicBounds(); + } + log( "Executing - done" ); } +void DeepPolySigmoidElement::storePredecessorSymbolicBounds() +{ + double *currentSymbolicLb = ( *_symbolicLbInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicUb = ( *_symbolicUbInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicLowerBias = ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicUpperBias = ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex]; + + for ( unsigned i = 0; i < _size; ++i ) + { + currentSymbolicLb[i * _size] = _symbolicLb[i]; + currentSymbolicUb[i * _size] = _symbolicUb[i]; + currentSymbolicLowerBias[i] = _symbolicLowerBias[i]; + currentSymbolicUpperBias[i] = _symbolicUpperBias[i]; + } +} + void DeepPolySigmoidElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, diff --git a/src/nlr/DeepPolySigmoidElement.h b/src/nlr/DeepPolySigmoidElement.h index 3c9d090f95..b93255c033 100644 --- a/src/nlr/DeepPolySigmoidElement.h +++ b/src/nlr/DeepPolySigmoidElement.h @@ -33,6 +33,8 @@ class DeepPolySigmoidElement : public DeepPolyElement void execute( const Map &deepPolyElementsBefore ); + void storePredecessorSymbolicBounds(); + void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolySignElement.cpp b/src/nlr/DeepPolySignElement.cpp index 200b6e40f9..d0a070c4f7 100644 --- a/src/nlr/DeepPolySignElement.cpp +++ b/src/nlr/DeepPolySignElement.cpp @@ -96,9 +96,31 @@ void DeepPolySignElement::execute( const Map &deepP _symbolicUpperBias[i] ) ); log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); } + + if ( _storeSymbolicBoundsInTermsOfPredecessor ) + { + storePredecessorSymbolicBounds(); + } + log( "Executing - done" ); } +void DeepPolySignElement::storePredecessorSymbolicBounds() +{ + double *currentSymbolicLb = ( *_symbolicLbInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicUb = ( *_symbolicUbInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicLowerBias = ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicUpperBias = ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex]; + + for ( unsigned i = 0; i < _size; ++i ) + { + currentSymbolicLb[i * _size] = _symbolicLb[i]; + currentSymbolicUb[i * _size] = _symbolicUb[i]; + currentSymbolicLowerBias[i] = _symbolicLowerBias[i]; + currentSymbolicUpperBias[i] = _symbolicUpperBias[i]; + } +} + void DeepPolySignElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolySignElement.h b/src/nlr/DeepPolySignElement.h index 53eb9edd4a..e699391806 100644 --- a/src/nlr/DeepPolySignElement.h +++ b/src/nlr/DeepPolySignElement.h @@ -33,6 +33,8 @@ class DeepPolySignElement : public DeepPolyElement void execute( const Map &deepPolyElementsBefore ); + void storePredecessorSymbolicBounds(); + void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolySoftmaxElement.cpp b/src/nlr/DeepPolySoftmaxElement.cpp index 574d3970b7..e61f990b66 100644 --- a/src/nlr/DeepPolySoftmaxElement.cpp +++ b/src/nlr/DeepPolySoftmaxElement.cpp @@ -188,9 +188,27 @@ void DeepPolySoftmaxElement::execute( } } } + + if ( _storeSymbolicBoundsInTermsOfPredecessor ) + { + storePredecessorSymbolicBounds(); + } + log( "Executing - done" ); } +void DeepPolySoftmaxElement::storePredecessorSymbolicBounds() +{ + double *currentSymbolicLb = ( *_symbolicLbInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicUb = ( *_symbolicUbInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicLowerBias = ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex]; + double *currentSymbolicUpperBias = ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex]; + memcpy( currentSymbolicLb, _symbolicLb, _size * _size * sizeof( double ) ); + memcpy( currentSymbolicUb, _symbolicUb, _size * _size * sizeof( double ) ); + memcpy( currentSymbolicLowerBias, _symbolicLowerBias, _size * sizeof( double ) ); + memcpy( currentSymbolicUpperBias, _symbolicUpperBias, _size * sizeof( double ) ); +} + void DeepPolySoftmaxElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, diff --git a/src/nlr/DeepPolySoftmaxElement.h b/src/nlr/DeepPolySoftmaxElement.h index 82e74dae56..ee004a64a2 100644 --- a/src/nlr/DeepPolySoftmaxElement.h +++ b/src/nlr/DeepPolySoftmaxElement.h @@ -33,6 +33,9 @@ class DeepPolySoftmaxElement : public DeepPolyElement ~DeepPolySoftmaxElement(); void execute( const Map &deepPolyElements ); + + void storePredecessorSymbolicBounds(); + void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, double *symbolicLowerBias, diff --git a/src/nlr/DeepPolyWeightedSumElement.cpp b/src/nlr/DeepPolyWeightedSumElement.cpp index 3f7276a433..14362a21d4 100644 --- a/src/nlr/DeepPolyWeightedSumElement.cpp +++ b/src/nlr/DeepPolyWeightedSumElement.cpp @@ -103,17 +103,17 @@ void DeepPolyWeightedSumElement::computeBoundWithBackSubstitution( currentElement, deepPolyElementsBefore ); - if ( _storeSymbolicBounds ) + if ( _storeOutputLayerSymbolicBounds ) { - precedingElement->storeWorkSymbolicBounds( sourceLayerSize, - _work1SymbolicLb, - _work1SymbolicUb, - _workSymbolicLowerBias, - _workSymbolicUpperBias, - _residualLb, - _residualUb, - _residualLayerIndices, - deepPolyElementsBefore ); + precedingElement->storeOutputSymbolicBounds( sourceLayerSize, + _work1SymbolicLb, + _work1SymbolicUb, + _workSymbolicLowerBias, + _workSymbolicUpperBias, + _residualLb, + _residualUb, + _residualLayerIndices, + deepPolyElementsBefore ); } log( Stringf( "Computing symbolic bounds with respect to layer %u - done", predecessorIndex ) ); @@ -244,17 +244,17 @@ void DeepPolyWeightedSumElement::computeBoundWithBackSubstitution( std::fill_n( _residualUb[newCurrentIndex], currentMatrixSize, 0 ); } - if ( _storeSymbolicBounds ) + if ( _storeOutputLayerSymbolicBounds ) { - precedingElement->storeWorkSymbolicBounds( sourceLayerSize, - _work1SymbolicLb, - _work1SymbolicUb, - _workSymbolicLowerBias, - _workSymbolicUpperBias, - _residualLb, - _residualUb, - _residualLayerIndices, - deepPolyElementsBefore ); + precedingElement->storeOutputSymbolicBounds( currentElement->getSize(), + _work1SymbolicLb, + _work1SymbolicUb, + _workSymbolicLowerBias, + _workSymbolicUpperBias, + _residualLb, + _residualUb, + _residualLayerIndices, + deepPolyElementsBefore ); } } ASSERT( _residualLayerIndices.empty() ); diff --git a/src/nlr/DeepPolyWeightedSumElement.h b/src/nlr/DeepPolyWeightedSumElement.h index 0f8ca74ef3..447017f640 100644 --- a/src/nlr/DeepPolyWeightedSumElement.h +++ b/src/nlr/DeepPolyWeightedSumElement.h @@ -77,6 +77,13 @@ class DeepPolyWeightedSumElement : public DeepPolyElement const double *symbolicUpperBias, DeepPolyElement *sourceElement ); + void + storeOutputSymbolicBounds( unsigned sourceLayerSize, + Map &residualLb, + Map &residualUb, + Set &residualLayerIndices, + const Map &deepPolyElementsBefore ); + void allocateMemoryForResidualsIfNeeded( unsigned residualLayerIndex, unsigned residualLayerSize ); void allocateMemory(); diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index a2d227f641..62af5e8e91 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -2275,7 +2275,7 @@ void Layer::computeSymbolicBoundsForSigmoid() double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); // Case when the Sigmoid constraint is fixed - if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) + if ( FloatUtils::areEqual( sourceLb, sourceUb ) ) { for ( unsigned j = 0; j < _inputLayerSize; ++j ) { @@ -4300,21 +4300,6 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector } } - -double Layer::calculateDifferenceFromSymbolic( Map &point, unsigned i ) const -{ - double lowerSum = _symbolicLowerBias[i]; - double upperSum = _symbolicUpperBias[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - lowerSum += _symbolicLb[j * _size + i] * point[j]; - upperSum += _symbolicUb[j * _size + i] * point[j]; - } - - return std::max( _ub[i] - upperSum, lowerSum - _lb[i] ); -} - double Layer::LSELowerBound( const Vector &inputs, const Vector &inputLbs, const Vector &inputUbs, diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index 02961c568f..cfff68e8a6 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -140,9 +140,10 @@ class Layer void computeSymbolicBounds(); void computeParameterisedSymbolicBounds( const Vector &coeffs, bool receive = false ); - // Return difference between given point and upper and lower bounds determined by parameterised - // SBT relaxation. - double calculateDifferenceFromSymbolic( Map &point, unsigned i ) const; + const double *getSymbolicLb() const; + const double *getSymbolicUb() const; + const double *getSymbolicLowerBias() const; + const double *getSymbolicUpperBias() const; // The following methods compute concrete softmax output bounds // using different linear approximation, as well as the coefficients @@ -308,10 +309,6 @@ class Layer void computeIntervalArithmeticBoundsForSoftmax(); void computeIntervalArithmeticBoundsForBilinear(); - const double *getSymbolicLb() const; - const double *getSymbolicUb() const; - const double *getSymbolicLowerBias() const; - const double *getSymbolicUpperBias() const; double getSymbolicLbOfLb( unsigned neuron ) const; double getSymbolicUbOfLb( unsigned neuron ) const; double getSymbolicLbOfUb( unsigned neuron ) const; diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index 5976941a25..bf4a3642f1 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -232,7 +232,7 @@ void NetworkLevelReasoner::parameterisedSymbolicBoundPropagation( const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; - _layerIndexToLayer[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); + _layerIndexToLayer[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs, true ); } } @@ -258,7 +258,7 @@ void NetworkLevelReasoner::lpRelaxationPropagation() optimizeBoundsWithPreimageApproximation( _layerIndexToLayer, lpFormulator ); else if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP ) - optimizeBoundsWithInvprop( _layerIndexToLayer, lpFormulator ); + optimizeBoundsWithPMNR( _layerIndexToLayer, lpFormulator ); else if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || Options::get()->getMILPSolverBoundTighteningType() == @@ -284,28 +284,14 @@ void NetworkLevelReasoner::optimizeBoundsWithPreimageApproximation( Map &layers, - LPFormulator &lpFormulator ) -{ - const Vector &polygonal_tightenings = - OptimizeParameterisedPolygonalTightening(); - const Vector &optimal_coeffs = Vector( {} ); - Map> layerIndicesToParameters = - getParametersForLayers( layers, optimal_coeffs ); - lpFormulator.optimizeBoundsWithLpRelaxation( - layers, false, layerIndicesToParameters, polygonal_tightenings ); - lpFormulator.optimizeBoundsWithLpRelaxation( - layers, true, layerIndicesToParameters, polygonal_tightenings ); -} - void NetworkLevelReasoner::optimizeBoundsWithPMNR( Map &layers, LPFormulator &lpFormulator ) { const Vector &polygonal_tightenings = OptimizeParameterisedPolygonalTightening(); - const Vector &optimal_coeffs = OptimalParameterisedSymbolicBoundTightening(); + const Vector &coeffs = Vector( {} ); Map> layerIndicesToParameters = - getParametersForLayers( layers, optimal_coeffs ); + getParametersForLayers( layers, coeffs ); lpFormulator.optimizeBoundsWithLpRelaxation( layers, false, layerIndicesToParameters, polygonal_tightenings ); lpFormulator.optimizeBoundsWithLpRelaxation( @@ -368,45 +354,7 @@ void NetworkLevelReasoner::freeMemoryIfNeeded() delete layer.second; _layerIndexToLayer.clear(); - for ( auto &pair : _outputLayerSymbolicLb ) - { - if ( pair.second ) - { - delete[] pair.second; - pair.second = NULL; - } - } - _outputLayerSymbolicLb.clear(); - - for ( auto &pair : _outputLayerSymbolicUb ) - { - if ( pair.second ) - { - delete[] pair.second; - pair.second = NULL; - } - } - _outputLayerSymbolicUb.clear(); - - for ( auto &pair : _outputLayerSymbolicLowerBias ) - { - if ( pair.second ) - { - delete[] pair.second; - pair.second = NULL; - } - } - _outputLayerSymbolicLowerBias.clear(); - - for ( auto &pair : _outputLayerSymbolicUpperBias ) - { - if ( pair.second ) - { - delete[] pair.second; - pair.second = NULL; - } - } - _outputLayerSymbolicUpperBias.clear(); + freeMemoryForSymbolicBoundMapsIfNeeded(); } void NetworkLevelReasoner::storeIntoOther( NetworkLevelReasoner &other ) const @@ -793,13 +741,12 @@ double NetworkLevelReasoner::getPreviousBias( const ReluConstraint *reluConstrai return _previousBiases[reluConstraint]; } - const double *NetworkLevelReasoner::getOutputLayerSymbolicLb( unsigned layerIndex ) const { - // Initialize map if empty - if ( _previousBiases.empty() ) + // Initialize map if empty. + if ( _outputLayerSymbolicLb.empty() ) { - const_cast( this )->initializeOutputLayerSymbolicBounds(); + const_cast( this )->initializeSymbolicBoundsMaps(); } if ( !_outputLayerSymbolicLb.exists( layerIndex ) ) @@ -812,10 +759,10 @@ const double *NetworkLevelReasoner::getOutputLayerSymbolicLb( unsigned layerInde const double *NetworkLevelReasoner::getOutputLayerSymbolicUb( unsigned layerIndex ) const { - // Initialize map if empty - if ( _previousBiases.empty() ) + // Initialize map if empty. + if ( _outputLayerSymbolicUb.empty() ) { - const_cast( this )->initializeOutputLayerSymbolicBounds(); + const_cast( this )->initializeSymbolicBoundsMaps(); } if ( !_outputLayerSymbolicUb.exists( layerIndex ) ) @@ -828,10 +775,10 @@ const double *NetworkLevelReasoner::getOutputLayerSymbolicUb( unsigned layerInde const double *NetworkLevelReasoner::getOutputLayerSymbolicLowerBias( unsigned layerIndex ) const { - // Initialize map if empty - if ( _previousBiases.empty() ) + // Initialize map if empty. + if ( _outputLayerSymbolicLowerBias.empty() ) { - const_cast( this )->initializeOutputLayerSymbolicBounds(); + const_cast( this )->initializeSymbolicBoundsMaps(); } if ( !_outputLayerSymbolicLowerBias.exists( layerIndex ) ) @@ -844,10 +791,10 @@ const double *NetworkLevelReasoner::getOutputLayerSymbolicLowerBias( unsigned la const double *NetworkLevelReasoner::getOutputLayerSymbolicUpperBias( unsigned layerIndex ) const { - // Initialize map if empty - if ( _previousBiases.empty() ) + // Initialize map if empty. + if ( _outputLayerSymbolicUpperBias.empty() ) { - const_cast( this )->initializeOutputLayerSymbolicBounds(); + const_cast( this )->initializeSymbolicBoundsMaps(); } if ( !_outputLayerSymbolicUpperBias.exists( layerIndex ) ) @@ -858,6 +805,72 @@ const double *NetworkLevelReasoner::getOutputLayerSymbolicUpperBias( unsigned la return _outputLayerSymbolicUpperBias[layerIndex]; } +const double *NetworkLevelReasoner::getSymbolicLbInTermsOfPredecessor( unsigned layerIndex ) const +{ + // Initialize map if empty. + if ( _symbolicLbInTermsOfPredecessor.empty() ) + { + const_cast( this )->initializeSymbolicBoundsMaps(); + } + + if ( !_symbolicLbInTermsOfPredecessor.exists( layerIndex ) ) + { + throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in symbolic bounds map." ); + } + + return _symbolicLbInTermsOfPredecessor[layerIndex]; +} + +const double *NetworkLevelReasoner::getSymbolicUbInTermsOfPredecessor( unsigned layerIndex ) const +{ + // Initialize map if empty. + if ( _symbolicUbInTermsOfPredecessor.empty() ) + { + const_cast( this )->initializeSymbolicBoundsMaps(); + } + + if ( !_symbolicUbInTermsOfPredecessor.exists( layerIndex ) ) + { + throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in symbolic bounds map." ); + } + + return _symbolicUbInTermsOfPredecessor[layerIndex]; +} + +const double * +NetworkLevelReasoner::getSymbolicLowerBiasInTermsOfPredecessor( unsigned layerIndex ) const +{ + // Initialize map if empty. + if ( _symbolicLowerBiasInTermsOfPredecessor.empty() ) + { + const_cast( this )->initializeSymbolicBoundsMaps(); + } + + if ( !_symbolicLowerBiasInTermsOfPredecessor.exists( layerIndex ) ) + { + throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in symbolic bounds map." ); + } + + return _symbolicLowerBiasInTermsOfPredecessor[layerIndex]; +} + +const double * +NetworkLevelReasoner::getSymbolicUpperBiasInTermsOfPredecessor( unsigned layerIndex ) const +{ + // Initialize map if empty. + if ( _symbolicUpperBiasInTermsOfPredecessor.empty() ) + { + const_cast( this )->initializeSymbolicBoundsMaps(); + } + + if ( !_symbolicUpperBiasInTermsOfPredecessor.exists( layerIndex ) ) + { + throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in symbolic bounds map." ); + } + + return _symbolicUpperBiasInTermsOfPredecessor[layerIndex]; +} + unsigned NetworkLevelReasoner::mergeConsecutiveWSLayers( const Map &lowerBounds, const Map &upperBounds, @@ -1088,7 +1101,7 @@ double NetworkLevelReasoner::EstimateVolume( const Map &layer if ( outputLayer->neuronEliminated( j ) ) continue; - double margin = outputLayer->calculateDifferenceFromSymbolic( point, j ); + double margin = calculateDifferenceFromSymbolic( outputLayer, point, j ); max_margin = std::max( max_margin, margin ); } sigmoid_sum += SigmoidConstraint::sigmoid( max_margin ); @@ -1098,6 +1111,24 @@ double NetworkLevelReasoner::EstimateVolume( const Map &layer GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; } +double NetworkLevelReasoner::calculateDifferenceFromSymbolic( const Layer *layer, + Map &point, + unsigned i ) const +{ + unsigned size = layer->getSize(); + unsigned inputLayerSize = _layerIndexToLayer[0]->getSize(); + double lowerSum = layer->getSymbolicLowerBias()[i]; + double upperSum = layer->getSymbolicUpperBias()[i]; + + for ( unsigned j = 0; j < inputLayerSize; ++j ) + { + lowerSum += layer->getSymbolicLb()[j * size + i] * point[j]; + upperSum += layer->getSymbolicUb()[j * size + i] * point[j]; + } + + return std::max( layer->getUb( i ) - upperSum, lowerSum - layer->getLb( i ) ); +} + const Vector NetworkLevelReasoner::OptimizeParameterisedPolygonalTightening() { const Map &layers = getLayerIndexToLayer(); @@ -1241,9 +1272,17 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( PolygonalTightening &tightening, Vector &prevTightenings ) { - // First, run parameterised symbolic bound propagation. - parameterisedSymbolicBoundPropagation( coeffs ); + // First, run parameterised symbolic bound propagation and compue successor layers. + Map> layerIndicesToParameters = + getParametersForLayers( _layerIndexToLayer, coeffs ); + for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) + { + const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; + _layerIndexToLayer[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); + } + computeSuccessorLayers(); + // Recursively compute mu, mu_hat for every layer. const unsigned numLayers = layers.size(); const unsigned maxLayer = layers.size() - 1; const unsigned numCoeffs = coeffs.size(); @@ -1252,6 +1291,7 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( Vector> mu( numLayers ); Vector> mu_hat( numLayers ); + Set handledInputNeurons; for ( unsigned index = maxLayer; index > 0; --index ) { @@ -1264,18 +1304,19 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( if ( layerIndex < maxLayer ) { - for ( const unsigned successorLayerIndex : layer->getSuccessorLayers() ) + for ( unsigned i = 0; i < layerSize; ++i ) { - const Layer *successorLayer = getLayer( successorLayerIndex ); - const unsigned successorLayerSize = successorLayer->getSize(); - const Layer::Type successorLayerType = successorLayer->getLayerType(); - - if ( successorLayerType == Layer::WEIGHTED_SUM ) + NeuronIndex neuron( layerIndex, i ); + for ( const unsigned successorLayerIndex : layer->getSuccessorLayers() ) { - const double *successorWeights = - successorLayer->getWeights( successorLayerIndex ); - for ( unsigned i = 0; i < layerSize; ++i ) + const Layer *successorLayer = getLayer( successorLayerIndex ); + const unsigned successorLayerSize = successorLayer->getSize(); + const Layer::Type successorLayerType = successorLayer->getLayerType(); + + if ( successorLayerType == Layer::WEIGHTED_SUM ) { + const double *successorWeights = successorLayer->getWeights( layerIndex ); + for ( unsigned j = 0; j < successorLayerSize; ++j ) { if ( !successorLayer->neuronEliminated( j ) ) @@ -1285,44 +1326,60 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( } } } - } - - else - { - // const double *successorSymbolicLbInTermsOfPredecessor = - // successorLayer->getSymbolicLbInTermsOfPredecessor(); const double - // *successorSymbolicUbInTermsOfPredecessor = - // successorLayer->getSymbolicUbInTermsOfPredecessor(); - /*const double *successorSymbolicLbInTermsOfPredecessor = - successorLayer->getSymbolicLb(); - const double *successorSymbolicUbInTermsOfPredecessor = - successorLayer->getSymbolicUb(); - for ( unsigned i = 0; i < layerSize; ++i ) + else { + const double *successorSymbolicLbInTermsOfPredecessor = + _symbolicLbInTermsOfPredecessor[successorLayerIndex]; + const double *successorSymbolicUbInTermsOfPredecessor = + _symbolicUbInTermsOfPredecessor[successorLayerIndex]; for ( unsigned j = 0; j < successorLayerSize; ++j ) { - if ( !successorLayer->neuronEliminated( j ) ) + // Find the index of the current neuron in the activation source list. + unsigned predecessorIndex = 0; + bool found = false; + List sources = successorLayer->getActivationSources( j ); + for ( const auto &sourceIndex : sources ) { - if ( mu[successorLayerIndex][j] >= 0 ) + if ( sourceIndex != neuron ) { - mu_hat[index][i] += mu[successorLayerIndex][j] * - successorSymbolicUbInTermsOfPredecessor - [i * successorLayerSize + j]; + if ( !found ) + { + ++predecessorIndex; + } } else { - mu_hat[index][i] -= mu[successorLayerIndex][j] * - successorSymbolicLbInTermsOfPredecessor - [i * successorLayerSize + j]; + found = true; + } + } + + if ( found ) + { + if ( !successorLayer->neuronEliminated( j ) ) + { + if ( mu[successorLayerIndex][j] >= 0 ) + { + mu_hat[index][i] += + mu[successorLayerIndex][j] * + successorSymbolicUbInTermsOfPredecessor + [j * successorLayerSize + predecessorIndex]; + } + else + { + mu_hat[index][i] -= + mu[successorLayerIndex][j] * + successorSymbolicLbInTermsOfPredecessor + [j * successorLayerSize + predecessorIndex]; + } } } } - }*/ + } } } } - + // Compute mu from mu_hat. mu[index] += mu_hat[index]; for ( unsigned i = 0; i < layerSize; ++i ) { @@ -1343,26 +1400,27 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( } } - Vector globalBoundVector( inputLayerSize, 0 ); + // Compute global bound for input space minimization problem. + Vector inputLayerBound( inputLayerSize, 0 ); for ( unsigned i = 0; i < inputLayerSize; ++i ) { - globalBoundVector[i] += tightening.getCoeff( NeuronIndex( 0, i ) ) - mu_hat[0][i]; + inputLayerBound[i] += tightening.getCoeff( NeuronIndex( 0, i ) ) - mu_hat[0][i]; for ( unsigned j = 0; j < numCoeffs; ++j ) { const PolygonalTightening pt = prevTightenings[j]; double prevCoeff = pt.getCoeff( NeuronIndex( 0, i ) ); if ( pt._type == PolygonalTightening::UB ) { - globalBoundVector[i] += gamma[j] * prevCoeff; + inputLayerBound[i] += gamma[j] * prevCoeff; } else { - globalBoundVector[i] -= gamma[j] * prevCoeff; + inputLayerBound[i] -= gamma[j] * prevCoeff; } } } - + // Compute lower bound for polygonal tightening bias using mu and inputLayerBound. double lowerBound = 0; for ( unsigned i = 0; i < numGamma; ++i ) { @@ -1399,14 +1457,6 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( } else { - /* // const double *successorSymbolicLowerBiasInTermsOfPredecessor = - // successorLayer->getSymbolicLowerBiasInTermsOfPredecessor(); const double - // *successorSymbolicUpperBiasInTermsOfPredecessor = - // successorLayer->getSymbolicUpperBiasInTermsOfPredecessor(); - const double *successorSymbolicLowerBiasInTermsOfPredecessor = - layer->getSymbolicLowerBias(); - const double *successorSymbolicUpperBiasInTermsOfPredecessor = - layer->getSymbolicUpperBias(); for ( unsigned i = 0; i < layerSize; ++i ) { if ( !layer->neuronEliminated( i ) ) @@ -1414,20 +1464,20 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( if ( mu[index][i] > 0 ) { lowerBound -= - mu[index][i] * successorSymbolicUpperBiasInTermsOfPredecessor[i]; + mu[index][i] * _symbolicUpperBiasInTermsOfPredecessor[index][i]; } else { lowerBound += - mu[index][i] * successorSymbolicLowerBiasInTermsOfPredecessor[i]; + mu[index][i] * _symbolicLowerBiasInTermsOfPredecessor[index][i]; } } else { - lowerBound += + lowerBound -= FloatUtils::abs( mu[index][i] ) * layer->getEliminatedNeuronValue( i ); } - }*/ + } } } @@ -1436,22 +1486,22 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( const double *inputUbs = inputLayer->getUbs(); for ( unsigned i = 0; i < inputLayerSize; ++i ) { - if ( globalBoundVector[i] > 0 ) + if ( inputLayerBound[i] > 0 ) { - lowerBound += globalBoundVector[i] * inputUbs[i]; + lowerBound += inputLayerBound[i] * inputUbs[i]; } else { - lowerBound += globalBoundVector[i] * inputLbs[i]; + lowerBound += inputLayerBound[i] * inputLbs[i]; } } return lowerBound; } const Vector -NetworkLevelReasoner::generatePolygonalTightenings( const Map &layers ) const +NetworkLevelReasoner::generatePolygonalTightenings( const Map &layers ) { - /*Vector tightenings = Vector( {} ); + Vector tightenings = Vector( {} ); if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || Options::get()->getMILPSolverBoundTighteningType() == @@ -1459,106 +1509,124 @@ NetworkLevelReasoner::generatePolygonalTightenings( const Map Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) { + unsigned neuronCount = GlobalConfiguration::PMNR_SELECTED_NEURONS; + Vector lowerBoundTightenings = Vector( {} ); + Vector upperBoundTightenings = Vector( {} ); const List constraints = selectConstraints( layers ); for ( const auto &pair : constraints ) { - unsigned layerIndex = constraints._layer; - unsigned neuron = constraints._neuron; + unsigned layerIndex = pair._layer; + unsigned neuron = pair._neuron; Layer *layer = layers[layerIndex]; + unsigned layerSize = layer->getSize(); - Map neuronToCoefficient = Map ( {} - ); + Map neuronToLowerCoefficient = Map( {} ); + Map neuronToUpperCoefficient = Map( {} ); + neuronToLowerCoefficient[pair] = -1; + neuronToUpperCoefficient[pair] = -1; - if ( layer->getLayerType() == NetworkLevelReasoner::WEIGHTED_SUM ) + List sources = layer->getActivationSources( neuron ); + unsigned predecessorIndex = 0; + for ( const auto &sourceIndex : sources ) { - const double *biases = layer->getBiases(); - for ( unsigned i = 0; i < layerSize; ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - lowerBound -= mu[index][i] * biases[i]; - } - else - { - lowerBound -= mu[index][i] * layer->getEliminatedNeuronValue[i]; - } - } + neuronToLowerCoefficient[sourceIndex] = + _symbolicLbInTermsOfPredecessor[layerIndex] + [neuron * layerSize + predecessorIndex]; + neuronToUpperCoefficient[sourceIndex] = + _symbolicLbInTermsOfPredecessor[layerIndex] + [neuron * layerSize + predecessorIndex]; + ++predecessorIndex; + } + PolygonalTightening lowerTightening( + neuronToLowerCoefficient, 0, PolygonalTightening::LB ); + PolygonalTightening upperTightening( + neuronToLowerCoefficient, 0, PolygonalTightening::UB ); + lowerBoundTightenings.append( lowerTightening ); + upperBoundTightenings.append( upperTightening ); } - else + + int range = ( 1 << neuronCount ) - 1; + for ( int i = 0; i < range; ++i ) { - const double *successorSymbolicLowerBiasInTermsOfPredecessor = - successorLayer->getSymbolicLbInTermsOfPredecessor(); const double - *successorSymbolicUpperBiasInTermsOfPredecessor = - successorLayer->getSymbolicUbInTermsOfPredecessor(); for ( unsigned i = 0; i < layerSize; ++i ) + Map neuronToLowerCoefficient = Map( {} ); + Map neuronToUpperCoefficient = Map( {} ); + for ( unsigned j = 0; j < neuronCount; ++j ) { - if ( !layer->neuronEliminated( i ) ) + int flag = ( i >> j ) % 2; + if ( flag > 0 ) { - if ( mu[index][i] > 0 ) + for ( const auto &pair : lowerBoundTightenings[j]._neuronToCoefficient ) { - lowerBound -= mu[index][i] * - successorSymbolicUpperBiasInTermsOfPredecessor[i]; + neuronToLowerCoefficient[pair.first] = pair.second; } - else + for ( const auto &pair : upperBoundTightenings[j]._neuronToCoefficient ) { - lowerBound += mu[index][i] * - successorSymbolicLowerBiasInTermsOfPredecessor[i]; + neuronToUpperCoefficient[pair.first] = pair.second; } } - else - { - lowerBound += FloatUtils::abs( mu[index][i] ) * - layer->getEliminatedNeuronValue[i]; - } } + PolygonalTightening lowerTightening( + neuronToLowerCoefficient, 0, PolygonalTightening::LB ); + PolygonalTightening upperTightening( + neuronToLowerCoefficient, 0, PolygonalTightening::UB ); + tightenings.append( lowerTightening ); + tightenings.append( upperTightening ); } } else if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP ) + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP ) { - - }*/ - unsigned n = layers.size(); - n = n; - const Vector defaultVector = Vector( {} ); - return defaultVector; + for ( const auto &pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = pair.second; + const Vector nonFixedNeurons = getNonFixedNeurons( layer ); + for ( const unsigned neuron : nonFixedNeurons ) + { + NeuronIndex index( layerIndex, neuron ); + Map neuronToLowerCoefficient = Map( {} ); + Map neuronToUpperCoefficient = Map( {} ); + neuronToLowerCoefficient[index] = 1; + neuronToUpperCoefficient[index] = 1; + PolygonalTightening lowerTightening( + neuronToLowerCoefficient, layer->getUb( neuron ), PolygonalTightening::LB ); + PolygonalTightening upperTightening( + neuronToLowerCoefficient, layer->getLb( neuron ), PolygonalTightening::UB ); + tightenings.append( lowerTightening ); + tightenings.append( upperTightening ); + } + } + } + const Vector tighteningsVector = + Vector( tightenings ); + return tighteningsVector; } const List -NetworkLevelReasoner::selectConstraints( const Map &layers ) const +NetworkLevelReasoner::selectConstraints( const Map &layers ) { + // ... List neuronList = List( {} ); + unsigned neuronCount = GlobalConfiguration::PMNR_SELECTED_NEURONS; switch ( Options::get()->getMILPSolverBoundTighteningType() ) { case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM: { - unsigned numNonlinear = countNonlinearLayers( layers ); + Vector candidateLayers = getLayersWithNonFixedNeurons( layers ); std::mt19937_64 rng( GlobalConfiguration::PMNR_RANDOM_SEED ); - std::uniform_int_distribution dis_layer( 0, numNonlinear - 1 ); - unsigned nonlinearIndex = dis_layer( rng ); - - unsigned index = 0; - while ( index != nonlinearIndex ) - { - Layer::Type type = _layerIndexToLayer[index]->getLayerType(); - - if ( type != Layer::WEIGHTED_SUM ) - { - ++index; - } - } + std::uniform_int_distribution dis_layer( 0, candidateLayers.size() - 1 ); + unsigned entry = dis_layer( rng ); + unsigned index = candidateLayers[entry]; Layer *layer = _layerIndexToLayer[index]; - unsigned layerSize = layer->getSize(); - std::uniform_int_distribution dis_neuron( 0, layerSize - 1 ); - for ( unsigned i = 0; i < GlobalConfiguration::PMNR_SELECTED_NEURONS; ++i ) + Vector candidateNeurons = getNonFixedNeurons( layer ); + std::uniform_int_distribution dis_neuron( 0, candidateNeurons.size() - 1 ); + for ( unsigned i = 0; i < neuronCount; ++i ) { - unsigned neuron = dis_neuron( rng ); - while ( layer->neuronEliminated( neuron ) ) - { - neuron = dis_neuron( rng ); - } + unsigned entry = dis_neuron( rng ); + unsigned neuron = candidateNeurons[entry]; neuronList.append( NeuronIndex( index, neuron ) ); } break; @@ -1566,6 +1634,8 @@ NetworkLevelReasoner::selectConstraints( const Map &layers ) case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT: { + initializeSymbolicBoundsMaps(); + double maxScore = 0; unsigned maxScoreIndex = 0; Map neuronIndexToScore; @@ -1576,14 +1646,14 @@ NetworkLevelReasoner::selectConstraints( const Map &layers ) Layer *layer = pair.second; unsigned layerSize = layer->getSize(); - if ( layer->getLayerType() == Layer::WEIGHTED_SUM ) + if ( getNonFixedNeurons( layer ).size() == 0 ) { continue; } for ( unsigned i = 0; i < layerSize; ++i ) { - if ( !( layer->neuronEliminated( i ) ) ) + if ( isNeuronNonFixed( layer, i ) ) { double neuronScore = std::pow( ( _outputLayerSymbolicLb[index][i] + _outputLayerSymbolicUb[index][i] ) / @@ -1609,14 +1679,14 @@ NetworkLevelReasoner::selectConstraints( const Map &layers ) max_priority_queue; for ( unsigned i = 0; i < layerSize; ++i ) { - if ( !( layer->neuronEliminated( i ) ) ) + if ( isNeuronNonFixed( layer, i ) ) { double neuronScore = neuronIndexToScore[NeuronIndex( maxScoreIndex, i )]; max_priority_queue.push( std::pair( neuronScore, i ) ); } } - for ( unsigned i = 0; i < GlobalConfiguration::PMNR_SELECTED_NEURONS; ++i ) + for ( unsigned i = 0; i < neuronCount; ++i ) { unsigned neuron = max_priority_queue.top().second; neuronList.append( NeuronIndex( maxScoreIndex, neuron ) ); @@ -1627,7 +1697,9 @@ NetworkLevelReasoner::selectConstraints( const Map &layers ) case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS: { - double maxScore = 0; + initializeSymbolicBoundsMaps(); + + /*double maxScore = 0; unsigned maxScoreIndex = 0; Map neuronIndexToScore; for ( const auto &pair : _layerIndexToLayer ) @@ -1635,14 +1707,17 @@ NetworkLevelReasoner::selectConstraints( const Map &layers ) double score = 0; unsigned index = pair.first; Layer *layer = pair.second; - // unsigned layerSize = layer->getSize(); + unsigned layerSize = layer->getSize(); + unsigned inputLayerSize = _layerIndexToLayer[0]->getSize(); + unsigned outputLayerSize = _layerIndexToLayer[getNumberOfLayers() - 1]->getSize(); + Vector outputLayerScores( outputLayerSize, 0 ); - if ( layer->getLayerType() == Layer::WEIGHTED_SUM ) + if ( getNonFixedNeurons( layer ).size() == 0 ) { continue; } - /*double *Lbs = layer->getLbs(); + double *Lbs = layer->getLbs(); double *Ubs = layer->getUbs(); double *symbolicLb = layer->getSymbolicLb(); double *symbolicUb = layer->getSymbolicUb(); @@ -1650,22 +1725,39 @@ NetworkLevelReasoner::selectConstraints( const Map &layers ) double *symbolicUpperBias = layer->getSymbolicUpperBias(); for ( unsigned i = 0; i < layerSize; ++i ) { - if - ( !( layer->neuronEliminated( i ) ) ) + + + + for ( unsigned j = 0; j < outputLayerSize; j++ ) { - double neuronScore = _outputLayerSymbolicLowerBias[i]; + outputLayerScores[j] = _outputLayerSymbolicLowerBias[j]; + if ( _outputLayerSymbolicLb[index][i] > 0 ) { - neuronScore += std::max( 0, _outputLayerSymbolicLb[index][i] ) * - symbolicLb[i] + std::min( 0, _outputLayerSymbolicLb[index][i] ) * - symbolicUb[i] + outputLayerScores[j] += _outputLayerSymbolicLb[index][i] * + symbolicLb[i]; } + else + { + outputLayerScores[j] += _outputLayerSymbolicLb[index][i] * + symbolicUb[i]; + } + + for ( unsigned k = 0; k < inputLayerSize; k++ ) + { - std::pow( ( _outputLayerSymbolicLb[index][i] + - _outputLayerSymbolicUb[index][i] ) / 2.0, 2 ); neuronIndexToScore.insert( NeuronIndex( - index, i ), neuronScore ); score += neuronScore; + } } - }*/ + + for ( unsigned j = 0; j < outputLayerSize; j++ ) + { + neuronScore += std::pow( outputLayerScores[j], 2 ); + } + + neuronIndexToScore.insert( NeuronIndex( index, i ), neuronScore ); + score += neuronScore; + } + } if ( score > maxScore ) { @@ -1682,20 +1774,20 @@ NetworkLevelReasoner::selectConstraints( const Map &layers ) max_priority_queue; for ( unsigned i = 0; i < layerSize; ++i ) { - if ( !( layer->neuronEliminated( i ) ) ) + if ( isNeuronNonFixed( layer, i ) ) { double neuronScore = neuronIndexToScore[NeuronIndex( maxScoreIndex, i )]; max_priority_queue.push( std::pair( neuronScore, i ) ); } } - for ( unsigned i = 0; i < GlobalConfiguration::PMNR_SELECTED_NEURONS; ++i ) + for ( unsigned i = 0; i < neuronCount; ++i ) { unsigned neuron = max_priority_queue.top().second; neuronList.append( NeuronIndex( maxScoreIndex, neuron ) ); max_priority_queue.pop(); } - break; + break;*/ } default: @@ -1706,16 +1798,36 @@ NetworkLevelReasoner::selectConstraints( const Map &layers ) return list; } +double NetworkLevelReasoner::getBranchingPoint( Layer *layer, unsigned neuron ) const +{ + ASSERT( isNeuronNonFixed( layer, neuron ) ); + double lb = layer->getLb( neuron ); + double ub = layer->getUb( neuron ); + + double branchingPoint; + if ( layer->getLayerType() == Layer::RELU || layer->getLayerType() == Layer::LEAKY_RELU || + layer->getLayerType() == Layer::SIGN || layer->getLayerType() == Layer::ABSOLUTE_VALUE ) + { + branchingPoint = 0; + } + else + { + branchingPoint = ( lb + ub ) / 2.0; + } + + return branchingPoint; +} + const Map> NetworkLevelReasoner::getParametersForLayers( const Map &layers, const Vector &coeffs ) const { unsigned index = 0; Map> layerIndicesToParameters; - for ( auto pair : layers ) + for ( const auto &pair : layers ) { unsigned layerIndex = pair.first; - Layer *layer = layers[layerIndex]; + Layer *layer = pair.second; unsigned n_coeffs = getNumberOfParametersPerType( layer->getLayerType() ); Vector current_coeffs( n_coeffs ); for ( unsigned i = 0; i < n_coeffs; ++i ) @@ -1732,10 +1844,9 @@ NetworkLevelReasoner::getParametersForLayers( const Map &laye unsigned NetworkLevelReasoner::getNumberOfParameters( const Map &layers ) const { unsigned num = 0; - for ( auto pair : layers ) + for ( const auto &pair : layers ) { - unsigned layerIndex = pair.first; - Layer *layer = layers[layerIndex]; + Layer *layer = pair.second; num += getNumberOfParametersPerType( layer->getLayerType() ); } return num; @@ -1752,6 +1863,203 @@ unsigned NetworkLevelReasoner::getNumberOfParametersPerType( Layer::Type t ) con return 0; } +const Vector +NetworkLevelReasoner::getLayersWithNonFixedNeurons( const Map &layers ) const +{ + Vector layerWithNonFixedNeurons = Vector( {} ); + for ( const auto &pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = pair.second; + const Vector nonFixedNeurons = getNonFixedNeurons( layer ); + if ( nonFixedNeurons.size() > 0 ) + { + layerWithNonFixedNeurons.append( layerIndex ); + } + } + const Vector layerList = Vector( layerWithNonFixedNeurons ); + return layerList; +} + +const Vector NetworkLevelReasoner::getNonFixedNeurons( Layer *layer ) const +{ + Vector nonFixedNeurons = Vector( {} ); + unsigned size = layer->getSize(); + for ( unsigned i = 0; i < size; ++i ) + { + if ( isNeuronNonFixed( layer, i ) ) + { + nonFixedNeurons.append( i ); + } + } + const Vector neuronList = Vector( nonFixedNeurons ); + return neuronList; +} + +bool NetworkLevelReasoner::isNeuronNonFixed( Layer *layer, unsigned neuron ) const +{ + if ( layer->neuronEliminated( neuron ) ) + { + return false; + } + + bool nonFixed = false; + switch ( layer->getLayerType() ) + { + case Layer::RELU: + case Layer::LEAKY_RELU: + { + double lb = layer->getLb( neuron ); + double ub = layer->getUb( neuron ); + nonFixed = !FloatUtils::isPositive( lb ) && !FloatUtils::isZero( ub ); + break; + } + case Layer::SIGN: + { + double lb = layer->getLb( neuron ); + double ub = layer->getUb( neuron ); + nonFixed = FloatUtils::isNegative( lb ) && !FloatUtils::isNegative( ub ); + break; + } + case Layer::ABSOLUTE_VALUE: + { + NeuronIndex sourceIndex = *layer->getActivationSources( neuron ).begin(); + const Layer *sourceLayer = getLayer( sourceIndex._layer ); + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + nonFixed = sourceLb < 0 && sourceUb > 0; + break; + } + case Layer::SIGMOID: + { + NeuronIndex sourceIndex = *layer->getActivationSources( neuron ).begin(); + const Layer *sourceLayer = getLayer( sourceIndex._layer ); + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + nonFixed = !FloatUtils::areEqual( sourceLb, sourceUb ); + break; + } + case Layer::ROUND: + { + NeuronIndex sourceIndex = *layer->getActivationSources( neuron ).begin(); + const Layer *sourceLayer = getLayer( sourceIndex._layer ); + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + nonFixed = + !FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ); + break; + } + case Layer::MAX: + { + List sources = layer->getActivationSources( neuron ); + const Layer *sourceLayer = getLayer( sources.begin()->_layer ); + NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); + double maxLowerBound = FloatUtils::negativeInfinity(); + double maxUpperBound = FloatUtils::negativeInfinity(); + + Map sourceUbs; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + sourceUbs[sourceIndex] = sourceUb; + if ( maxLowerBound < sourceLb ) + { + indexOfMaxLowerBound = sourceIndex; + maxLowerBound = sourceLb; + } + if ( maxUpperBound < sourceUb ) + { + maxUpperBound = sourceUb; + } + } + + bool phaseFixed = true; + for ( const auto &sourceIndex : sources ) + { + if ( sourceIndex != indexOfMaxLowerBound && + FloatUtils::gt( sourceUbs[sourceIndex], maxLowerBound ) ) + { + phaseFixed = false; + break; + } + } + nonFixed = !phaseFixed; + break; + } + case Layer::SOFTMAX: + { + List sources = layer->getActivationSources( neuron ); + const Layer *sourceLayer = getLayer( sources.begin()->_layer ); + Vector sourceLbs; + Vector sourceUbs; + Set handledInputNeurons; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + } + + unsigned index = 0; + for ( const auto &sourceIndex : sources ) + { + if ( handledInputNeurons.exists( sourceIndex._neuron ) ) + ++index; + else + { + handledInputNeurons.insert( sourceIndex._neuron ); + break; + } + } + + double lb = std::max( layer->getLb( neuron ), + Layer::linearLowerBound( sourceLbs, sourceUbs, index ) ); + double ub = std::max( layer->getUb( neuron ), + Layer::linearUpperBound( sourceLbs, sourceUbs, index ) ); + nonFixed = !FloatUtils::areEqual( lb, ub ); + break; + } + case Layer::BILINEAR: + { + List sources = layer->getActivationSources( neuron ); + const Layer *sourceLayer = getLayer( sources.begin()->_layer ); + bool eitherConstant = false; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + eitherConstant = true; + } + if ( FloatUtils::areEqual( sourceLb, sourceUb ) ) + { + eitherConstant = true; + } + } + nonFixed = !eitherConstant; + break; + } + case Layer::WEIGHTED_SUM: + case Layer::INPUT: + { + nonFixed = false; + break; + } + default: + { + nonFixed = false; + break; + } + } + return nonFixed; +} + unsigned NetworkLevelReasoner::countNonlinearLayers( const Map &layers ) const { unsigned num = 0; @@ -1764,9 +2072,9 @@ unsigned NetworkLevelReasoner::countNonlinearLayers( const Map( new DeepPolyAnalysis( this, + true, true, &_outputLayerSymbolicLb, &_outputLayerSymbolicUb, &_outputLayerSymbolicLowerBias, - &_outputLayerSymbolicUpperBias ) ); + &_outputLayerSymbolicUpperBias, + &_symbolicLbInTermsOfPredecessor, + &_symbolicUbInTermsOfPredecessor, + &_symbolicLowerBiasInTermsOfPredecessor, + &_symbolicUpperBiasInTermsOfPredecessor ) ); _deepPolyAnalysis->run(); // Remove new weighted sum layer. @@ -1808,34 +2121,51 @@ void NetworkLevelReasoner::initializeOutputLayerSymbolicBounds() } } -void NetworkLevelReasoner::allocateMemoryForOutputLayerSBTIfNeeded() +void NetworkLevelReasoner::allocateMemoryForSymbolicBoundMapsIfNeeded() { unsigned outputLayerSize = _layerIndexToLayer[getNumberOfLayers() - 1]->getSize(); - + unsigned maxLayerSize = getMaxLayerSize(); for ( const auto &pair : _layerIndexToLayer ) { unsigned layerIndex = pair.first; Layer *layer = pair.second; unsigned layerSize = layer->getSize(); - double *currentSymbolicLb = new double[layerSize * outputLayerSize]; - double *currentSymbolicUb = new double[layerSize * outputLayerSize]; - double *currentSymbolicLowerBias = new double[outputLayerSize]; - double *currentSymbolicUpperBias = new double[outputLayerSize]; + double *currentOutputSymbolicLb = new double[layerSize * outputLayerSize]; + double *currentOutputSymbolicUb = new double[layerSize * outputLayerSize]; + double *currentOutputSymbolicLowerBias = new double[outputLayerSize]; + double *currentOutputSymbolicUpperBias = new double[outputLayerSize]; + + double *currentPredecessorSymbolicLb = new double[maxLayerSize * layerSize]; + double *currentPredecessorSymbolicUb = new double[maxLayerSize * layerSize]; + double *currentPredecessorSymbolicLowerBias = new double[layerSize]; + double *currentPredecessorSymbolicUpperBias = new double[layerSize]; + + std::fill_n( currentOutputSymbolicLb, layerSize * outputLayerSize, 0 ); + std::fill_n( currentOutputSymbolicUb, layerSize * outputLayerSize, 0 ); + std::fill_n( currentOutputSymbolicLowerBias, outputLayerSize, 0 ); + std::fill_n( currentOutputSymbolicUpperBias, outputLayerSize, 0 ); - std::fill_n( currentSymbolicLb, layerSize * outputLayerSize, 0 ); - std::fill_n( currentSymbolicUb, layerSize * outputLayerSize, 0 ); - std::fill_n( currentSymbolicLowerBias, outputLayerSize, 0 ); - std::fill_n( currentSymbolicUpperBias, outputLayerSize, 0 ); + std::fill_n( currentPredecessorSymbolicLb, maxLayerSize * layerSize, 0 ); + std::fill_n( currentPredecessorSymbolicUb, maxLayerSize * layerSize, 0 ); + std::fill_n( currentPredecessorSymbolicLowerBias, layerSize, 0 ); + std::fill_n( currentPredecessorSymbolicUpperBias, layerSize, 0 ); - _outputLayerSymbolicLb.insert( layerIndex, currentSymbolicLb ); - _outputLayerSymbolicUb.insert( layerIndex, currentSymbolicUb ); - _outputLayerSymbolicLowerBias.insert( layerIndex, currentSymbolicLowerBias ); - _outputLayerSymbolicUpperBias.insert( layerIndex, currentSymbolicUpperBias ); + _outputLayerSymbolicLb.insert( layerIndex, currentOutputSymbolicLb ); + _outputLayerSymbolicUb.insert( layerIndex, currentOutputSymbolicUb ); + _outputLayerSymbolicLowerBias.insert( layerIndex, currentOutputSymbolicLowerBias ); + _outputLayerSymbolicUpperBias.insert( layerIndex, currentOutputSymbolicUpperBias ); + + _symbolicLbInTermsOfPredecessor.insert( layerIndex, currentPredecessorSymbolicLb ); + _symbolicUbInTermsOfPredecessor.insert( layerIndex, currentPredecessorSymbolicUb ); + _symbolicLowerBiasInTermsOfPredecessor.insert( layerIndex, + currentPredecessorSymbolicLowerBias ); + _symbolicUpperBiasInTermsOfPredecessor.insert( layerIndex, + currentPredecessorSymbolicUpperBias ); } } -void NetworkLevelReasoner::freeMemoryForOutputLayerSBTIfNeeded() +void NetworkLevelReasoner::freeMemoryForSymbolicBoundMapsIfNeeded() { for ( auto &pair : _outputLayerSymbolicLb ) { @@ -1876,6 +2206,46 @@ void NetworkLevelReasoner::freeMemoryForOutputLayerSBTIfNeeded() } } _outputLayerSymbolicUpperBias.clear(); + + for ( auto &pair : _symbolicLbInTermsOfPredecessor ) + { + if ( pair.second ) + { + delete[] pair.second; + pair.second = NULL; + } + } + _symbolicLbInTermsOfPredecessor.clear(); + + for ( auto &pair : _symbolicUbInTermsOfPredecessor ) + { + if ( pair.second ) + { + delete[] pair.second; + pair.second = NULL; + } + } + _symbolicUbInTermsOfPredecessor.clear(); + + for ( auto &pair : _symbolicLowerBiasInTermsOfPredecessor ) + { + if ( pair.second ) + { + delete[] pair.second; + pair.second = NULL; + } + } + _symbolicLowerBiasInTermsOfPredecessor.clear(); + + for ( auto &pair : _symbolicUpperBiasInTermsOfPredecessor ) + { + if ( pair.second ) + { + delete[] pair.second; + pair.second = NULL; + } + } + _symbolicUpperBiasInTermsOfPredecessor.clear(); } void NetworkLevelReasoner::mergeWSLayers( unsigned secondLayerIndex, diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index 045b61300c..6b3b78afb1 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -147,8 +147,7 @@ class NetworkLevelReasoner : public LayerOwner void MILPTighteningForOneLayer( unsigned targetIndex ); void iterativePropagation(); - - void initializeOutputLayerSymbolicBounds(); + void initializeSymbolicBoundsMaps(); // Return optimizable parameters which minimize parameterised SBT bounds' volume. const Vector OptimalParameterisedSymbolicBoundTightening(); @@ -208,12 +207,16 @@ class NetworkLevelReasoner : public LayerOwner */ double getPreviousBias( const ReluConstraint *reluConstraint ) const; - const double *getOutputLayerSymbolicLb( unsigned layerIndex ) const; const double *getOutputLayerSymbolicUb( unsigned layerIndex ) const; const double *getOutputLayerSymbolicLowerBias( unsigned layerIndex ) const; const double *getOutputLayerSymbolicUpperBias( unsigned layerIndex ) const; + const double *getSymbolicLbInTermsOfPredecessor( unsigned layerIndex ) const; + const double *getSymbolicUbInTermsOfPredecessor( unsigned layerIndex ) const; + const double *getSymbolicLowerBiasInTermsOfPredecessor( unsigned layerIndex ) const; + const double *getSymbolicUpperBiasInTermsOfPredecessor( unsigned layerIndex ) const; + /* Finds logically consecutive WS layers and merges them, in order to reduce the total number of layers and variables in the @@ -249,8 +252,8 @@ class NetworkLevelReasoner : public LayerOwner void freeMemoryIfNeeded(); // Manage memory for symbolic bounds maps. - void allocateMemoryForOutputLayerSBTIfNeeded(); - void freeMemoryForOutputLayerSBTIfNeeded(); + void allocateMemoryForSymbolicBoundMapsIfNeeded(); + void freeMemoryForSymbolicBoundMapsIfNeeded(); List _constraintsInTopologicalOrder; @@ -288,8 +291,6 @@ class NetworkLevelReasoner : public LayerOwner // Estimate Volume of parameterised symbolic bound tightening. double EstimateVolume( const Map &layers, const Vector &coeffs ); - void optimizeBoundsWithInvprop( Map &layers, LPFormulator &lpFormulator ); - void optimizeBoundsWithPMNR( Map &layers, LPFormulator &lpFormulator ); // Optimize biases of generated parameterised polygonal tightenings. @@ -307,10 +308,18 @@ class NetworkLevelReasoner : public LayerOwner Vector &prevTightenings ); const Vector - generatePolygonalTightenings( const Map &layers ) const; + generatePolygonalTightenings( const Map &layers ); // Heuristically select neurons and polygonal tightenings for PMNR. - const List selectConstraints( const Map &layers ) const; + const List selectConstraints( const Map &layers ); + + // Return difference between given point and upper and lower bounds determined by parameterised + // SBT relaxation. + double calculateDifferenceFromSymbolic( const Layer *layer, + Map &point, + unsigned i ) const; + + double getBranchingPoint( Layer *layer, unsigned neuron ) const; // Get map containing vector of optimizable parameters for parameterised SBT relaxation for // every layer index. @@ -324,6 +333,14 @@ class NetworkLevelReasoner : public LayerOwner // Get number of optimizable parameters for parameterised SBT relaxation per layer type. unsigned getNumberOfParametersPerType( Layer::Type t ) const; + // Get all indices of active non-weighted sum layers for INVPROP. + const Vector + getLayersWithNonFixedNeurons( const Map &layers ) const; + + const Vector getNonFixedNeurons( Layer *layer ) const; + + bool isNeuronNonFixed( Layer *layer, unsigned neuron ) const; + // Get total number of non-weighted sum layers for INVPROP. unsigned countNonlinearLayers( const Map &layers ) const; @@ -345,6 +362,11 @@ class NetworkLevelReasoner : public LayerOwner Map _outputLayerSymbolicUb; Map _outputLayerSymbolicLowerBias; Map _outputLayerSymbolicUpperBias; + + Map _symbolicLbInTermsOfPredecessor; + Map _symbolicUbInTermsOfPredecessor; + Map _symbolicLowerBiasInTermsOfPredecessor; + Map _symbolicUpperBiasInTermsOfPredecessor; }; } // namespace NLR diff --git a/src/nlr/tests/Test_DeepPolyAnalysis.h b/src/nlr/tests/Test_DeepPolyAnalysis.h index 77c4d11f1a..8e852e30e0 100644 --- a/src/nlr/tests/Test_DeepPolyAnalysis.h +++ b/src/nlr/tests/Test_DeepPolyAnalysis.h @@ -208,6 +208,64 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); for ( const auto &bound : expectedBounds ) TS_ASSERT( existsBound( bounds, bound ) ); + + + // Reset bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + List expectedBounds2( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, 1, Tightening::LB ), Tightening( 10, 5.5, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds2.size(), bounds.size() ); + for ( const auto &bound : expectedBounds2 ) + TS_ASSERT( existsBound( bounds, bound ) ); } void populateResidualNetwork1( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) @@ -290,7 +348,7 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeOutputLayerSymbolicBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); /* Input ranges: diff --git a/src/nlr/tests/Test_PMNR.h b/src/nlr/tests/Test_PMNR.h new file mode 100644 index 0000000000..ce4e723758 --- /dev/null +++ b/src/nlr/tests/Test_PMNR.h @@ -0,0 +1,11387 @@ +/********************* */ +/*! \file Test_LPRelaxation.h + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Andrew Wu + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + +**/ + +#include "../../engine/tests/MockTableau.h" // TODO: fix this +#include "DeepPolySoftmaxElement.h" +#include "FloatUtils.h" +#include "Layer.h" +#include "NetworkLevelReasoner.h" +#include "Options.h" +#include "Query.h" +#include "Tightening.h" +#include "Vector.h" + +#include + +class MockForNetworkLevelReasoner +{ +public: +}; + +class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite +{ +public: + MockForNetworkLevelReasoner *mock; + + void setUp() + { + TS_ASSERT( mock = new MockForNetworkLevelReasoner ); + } + + void tearDown() + { + TS_ASSERT_THROWS_NOTHING( delete mock ); + } + + void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 R -1 R -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ R / \ R / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using ReLU activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardReLU2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = ReLU( x3 ) + x8 = ReLU( x4 ) + x9 = ReLU( x5 ) + x10 = ReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = ReLU( x11 ) + x15 = ReLU( x12 ) + x16 = ReLU( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = ReLU( x17 ) + x20 = ReLU( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::RELU, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 S -1 S -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ S / \ S / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Sigmoid activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardSigmoid2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Sigmoid( x3 ) + x8 = Sigmoid( x4 ) + x9 = Sigmoid( x5 ) + x10 = Sigmoid( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Sigmoid( x11 ) + x15 = Sigmoid( x12 ) + x16 = Sigmoid( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Sigmoid( x17 ) + x20 = Sigmoid( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 Sign -1 Sign -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ Sign / \ Sign / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Sign activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardSign2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Sign( x3 ) + x8 = Sign( x4 ) + x9 = SIgn( x5 ) + x10 = Sign( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Sign( x11 ) + x15 = Sign( x12 ) + x16 = Sign( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Sign( x17 ) + x20 = Sign( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 Rnd -1 Rnd -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ Rnd / \ Rnd / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Round activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardRound2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Round( x3 ) + x8 = Round( x4 ) + x9 = Round( x5 ) + x10 = Round( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Round( x11 ) + x15 = Round( x12 ) + x16 = Round( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Round( x17 ) + x20 = Round( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardAbs( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 A -1 A -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ A / \ A / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Absolute value activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardAbs2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Abs( x3 ) + x8 = Abs( x4 ) + x9 = Abs( x5 ) + x10 = Abs( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Abs( x11 ) + x15 = Abs( x12 ) + x16 = Abs( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Abs( x17 ) + x20 = Abs( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 LR -1 LR -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ LR / \ LR / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardLeakyRelu2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = LeakyReLU( x3 ) + x8 = LeakyReLU( x4 ) + x9 = LeakyReLU( x5 ) + x10 = LeakyReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = LeakyReLU( x11 ) + x15 = LeakyReLU( x12 ) + x16 = LeakyReLU( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = LeakyReLU( x17 ) + x20 = LeakyReLU( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + nlr.getLayer( 6 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + a a' + x e + b b' f h + y d + c c' + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + + void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 + x1 x12 x15 x17 + x5 x9 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7, x8, x9, x10 = Softmax( x2, x3, x4, x5 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14, x15, x16 = Softmax( x11, x12, x13 ) + + x17 = Max( x14, x15, x16 ) + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 5, NLR::Layer::MAX, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 3, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 2 ); + nlr.addActivationSource( 1, 0, 2, 3 ); + nlr.addActivationSource( 1, 1, 2, 3 ); + nlr.addActivationSource( 1, 2, 2, 3 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + nlr.addActivationSource( 3, 2, 4, 0 ); + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 1 ); + nlr.addActivationSource( 3, 0, 4, 2 ); + nlr.addActivationSource( 3, 1, 4, 2 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 4, 0, 5, 0 ); + nlr.addActivationSource( 4, 1, 5, 0 ); + nlr.addActivationSource( 4, 2, 5, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 18 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + } + + void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + a a' + x e + b b' f h + y d + c c' + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + x3 x7 + x0 + x4 x8 x11 x13 + x1 x15 + x5 x9 x12 x14 + x2 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = ReLU( x3 ) + x8 = ReLU( x4 ) + x9 = ReLU( x5 ) + x10 = ReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + + x13 = ReLU( x11 ) + x14 = ReLU( x12 ) + + x15 = x13 * x14 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::BILINEAR, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setBias( 3, 0, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + nlr.addActivationSource( 4, 0, 5, 0 ); + nlr.addActivationSource( 4, 1, 5, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 14 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 15 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 16 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + } + + void test_invprop_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 8, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 19, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 20, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_sigmoid() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), + + Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), + Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), + + Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), + Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), + + Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), + Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), + Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), + + Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), + Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), + + Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), + Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), + + Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), + Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_sigmoid2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), + Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), + + Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), + Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), + Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), + + Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), + Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), + Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), + + Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), + Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), + + Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), + Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), + + Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), + Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), + Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), + + Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), + Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), + Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), + + Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), + Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), + Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), + + Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), + Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), + + Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), + Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), + + Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_abs() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), + + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), + + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_abs2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), + + Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), + + Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 11, -4, Tightening::LB ), + Tightening( 11, 6, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), + + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -10, Tightening::LB ), + Tightening( 7, 5, Tightening::UB ), + + Tightening( 11, -14, Tightening::LB ), + Tightening( 11, 11, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_round2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), + Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), + Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), + + Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), + Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), + + Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), + Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), + + Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), + Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), + Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), + Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), + + Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 4, -0.1, Tightening::LB ), + + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_leaky_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 14, -0.9, Tightening::LB ), + Tightening( 15, -0.89, Tightening::LB ), + Tightening( 16, -0.77, Tightening::LB ), + + Tightening( 19, -2.3133, Tightening::LB ), + Tightening( 20, -1.2, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), + Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 14, -1.1, Tightening::LB ), + Tightening( 15, -2.1771, Tightening::LB ), + Tightening( 16, -1.15, Tightening::LB ), + + Tightening( 19, -5.6259, Tightening::LB ), + Tightening( 20, -1.9, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_softmax_and_max() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), + Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), + Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), + + Tightening( 8, -0.1870, Tightening::LB ), Tightening( 8, 1.4488, Tightening::UB ), + Tightening( 9, 0.6814, Tightening::LB ), Tightening( 9, 2.2432, Tightening::UB ), + + Tightening( 10, 0.6814, Tightening::LB ), Tightening( 10, 2.2432, Tightening::UB ), + + Tightening( 11, -2.2432, Tightening::LB ), Tightening( 11, -0.6814, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.2194, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), + + Tightening( 10, 0.2194, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), + + Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.2194, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_softmax_and_max2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), + Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), + Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), + Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), + + Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9634, Tightening::UB ), + Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9152, Tightening::UB ), + Tightening( 13, -2.9868, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), + + Tightening( 14, 0.1143, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8694, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4596, Tightening::UB ), + + Tightening( 17, 0.1143, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), + + Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9326, Tightening::UB ), + Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), + Tightening( 13, -2.9990, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), + + Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), + + Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_relu_and_bilinear2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 8, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 19, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 20, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_sigmoid() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), + + Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), + Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), + + Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), + Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), + + Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), + Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), + Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), + + Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), + Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), + + Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), + Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), + + Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), + Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_sigmoid2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), + Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), + + Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), + Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), + Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), + + Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), + Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), + Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), + + Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), + Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), + + Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), + Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), + + Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), + Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), + Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), + + Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), + Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), + Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), + + Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), + Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), + Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), + + Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), + Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), + + Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), + Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), + + Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_abs() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), + + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), + + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_abs2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), + + Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), + + Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 11, -4, Tightening::LB ), + Tightening( 11, 6, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), + + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -10, Tightening::LB ), + Tightening( 7, 5, Tightening::UB ), + + Tightening( 11, -14, Tightening::LB ), + Tightening( 11, 11, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_round2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), + Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), + Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), + + Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), + Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), + + Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), + Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), + + Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), + Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), + Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), + Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), + + Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 4, -0.1, Tightening::LB ), + + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_leaky_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 14, -0.9, Tightening::LB ), + Tightening( 15, -0.89, Tightening::LB ), + Tightening( 16, -0.77, Tightening::LB ), + + Tightening( 19, -2.3133, Tightening::LB ), + Tightening( 20, -1.2, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), + Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 14, -1.1, Tightening::LB ), + Tightening( 15, -2.1771, Tightening::LB ), + Tightening( 16, -1.15, Tightening::LB ), + + Tightening( 19, -5.6259, Tightening::LB ), + Tightening( 20, -1.9, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_softmax_and_max() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), + Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), + Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), + + Tightening( 8, -0.1870, Tightening::LB ), Tightening( 8, 1.4488, Tightening::UB ), + Tightening( 9, 0.6814, Tightening::LB ), Tightening( 9, 2.2432, Tightening::UB ), + + Tightening( 10, 0.6814, Tightening::LB ), Tightening( 10, 2.2432, Tightening::UB ), + + Tightening( 11, -2.2432, Tightening::LB ), Tightening( 11, -0.6814, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.2194, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), + + Tightening( 10, 0.2194, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), + + Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.2194, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_softmax_and_max2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), + Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), + Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), + Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), + + Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9634, Tightening::UB ), + Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9152, Tightening::UB ), + Tightening( 13, -2.9868, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), + + Tightening( 14, 0.1143, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8694, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4596, Tightening::UB ), + + Tightening( 17, 0.1143, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), + + Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9326, Tightening::UB ), + Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), + Tightening( 13, -2.9990, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), + + Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), + + Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_relu_and_bilinear2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + + void test_pmnr_gradient_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 8, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 19, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 20, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_sigmoid() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), + + Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), + Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), + + Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), + Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), + + Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), + Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), + Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), + + Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), + Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), + + Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), + Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), + + Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), + Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_sigmoid2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), + Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), + + Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), + Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), + Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), + + Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), + Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), + Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), + + Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), + Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), + + Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), + Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), + + Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), + Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), + Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), + + Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), + Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), + Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), + + Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), + Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), + Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), + + Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), + Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), + + Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), + Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), + + Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_abs() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), + + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), + + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_abs2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), + + Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), + + Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 11, -4, Tightening::LB ), + Tightening( 11, 6, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), + + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -10, Tightening::LB ), + Tightening( 7, 5, Tightening::UB ), + + Tightening( 11, -14, Tightening::LB ), + Tightening( 11, 11, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_round2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), + Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), + Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), + + Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), + Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), + + Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), + Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), + + Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), + Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), + Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), + Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), + + Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 4, -0.1, Tightening::LB ), + + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_leaky_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 14, -0.9, Tightening::LB ), + Tightening( 15, -0.89, Tightening::LB ), + Tightening( 16, -0.77, Tightening::LB ), + + Tightening( 19, -2.3133, Tightening::LB ), + Tightening( 20, -1.2, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), + Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 14, -1.1, Tightening::LB ), + Tightening( 15, -2.1771, Tightening::LB ), + Tightening( 16, -1.15, Tightening::LB ), + + Tightening( 19, -5.6259, Tightening::LB ), + Tightening( 20, -1.9, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_softmax_and_max() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), + Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), + Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), + + Tightening( 8, -0.1870, Tightening::LB ), Tightening( 8, 1.4488, Tightening::UB ), + Tightening( 9, 0.6814, Tightening::LB ), Tightening( 9, 2.2432, Tightening::UB ), + + Tightening( 10, 0.6814, Tightening::LB ), Tightening( 10, 2.2432, Tightening::UB ), + + Tightening( 11, -2.2432, Tightening::LB ), Tightening( 11, -0.6814, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.2194, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), + + Tightening( 10, 0.2194, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), + + Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.2194, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_softmax_and_max2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), + Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), + Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), + Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), + + Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9634, Tightening::UB ), + Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9152, Tightening::UB ), + Tightening( 13, -2.9868, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), + + Tightening( 14, 0.1143, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8694, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4596, Tightening::UB ), + + Tightening( 17, 0.1143, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), + + Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9326, Tightening::UB ), + Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), + Tightening( 13, -2.9990, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), + + Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), + + Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_relu_and_bilinear2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 8, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 19, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 20, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_sigmoid() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), + + Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), + Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), + + Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), + Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), + + Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), + Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), + Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), + + Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), + Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), + + Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), + Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), + + Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), + Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_sigmoid2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), + Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), + + Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), + Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), + Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), + + Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), + Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), + Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), + + Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), + Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), + + Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), + Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), + + Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), + Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), + Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), + + Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), + Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), + Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), + + Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), + Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), + Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), + + Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), + Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), + + Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), + Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), + + Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_abs() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), + + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), + + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_abs2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), + + Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), + + Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 11, -4, Tightening::LB ), + Tightening( 11, 6, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), + + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -10, Tightening::LB ), + Tightening( 7, 5, Tightening::UB ), + + Tightening( 11, -14, Tightening::LB ), + Tightening( 11, 11, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_round2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), + Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), + Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), + + Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), + Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), + + Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), + Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), + + Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), + Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), + Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), + Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), + + Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 4, -0.1, Tightening::LB ), + + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_leaky_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 14, -0.9, Tightening::LB ), + Tightening( 15, -0.89, Tightening::LB ), + Tightening( 16, -0.77, Tightening::LB ), + + Tightening( 19, -2.3133, Tightening::LB ), + Tightening( 20, -1.2, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), + Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 14, -1.1, Tightening::LB ), + Tightening( 15, -2.1771, Tightening::LB ), + Tightening( 16, -1.15, Tightening::LB ), + + Tightening( 19, -5.6259, Tightening::LB ), + Tightening( 20, -1.9, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_softmax_and_max() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), + Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), + Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), + + Tightening( 8, -0.1870, Tightening::LB ), Tightening( 8, 1.4488, Tightening::UB ), + Tightening( 9, 0.6814, Tightening::LB ), Tightening( 9, 2.2432, Tightening::UB ), + + Tightening( 10, 0.6814, Tightening::LB ), Tightening( 10, 2.2432, Tightening::UB ), + + Tightening( 11, -2.2432, Tightening::LB ), Tightening( 11, -0.6814, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.2194, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), + + Tightening( 10, 0.2194, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), + + Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.2194, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_softmax_and_max2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), + Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), + Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), + Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), + + Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9634, Tightening::UB ), + Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9152, Tightening::UB ), + Tightening( 13, -2.9868, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), + + Tightening( 14, 0.1143, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8694, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4596, Tightening::UB ), + + Tightening( 17, 0.1143, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), + + Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9326, Tightening::UB ), + Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), + Tightening( 13, -2.9990, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), + + Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), + + Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_relu_and_bilinear2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + bool boundsEqual( const List &bounds, const List &expectedBounds ) + { + if ( bounds.size() < expectedBounds.size() ) + return false; + + bool allFound = true; + for ( const auto &bound : bounds ) + { + bool currentFound = false; + for ( const auto &expectedBound : expectedBounds ) + { + currentFound |= + ( bound._type == expectedBound._type && + bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + } + allFound &= currentFound; + } + return allFound; + } + + // Create list of all tightenings in newBounds for which there is no bound in newBounds or in + // bounds which is at least as tight. + List removeRedundancies( const List &bounds, + const List &newBounds ) + { + List minimalBounds; + + for ( const auto &newBound : newBounds ) + { + bool foundTighter = false; + for ( const auto &bound : bounds ) + { + foundTighter |= + ( newBound._type == bound._type && newBound._variable == bound._variable && + ( ( newBound._type == Tightening::LB && + FloatUtils::lte( newBound._value, bound._value, 0.0001 ) ) || + ( newBound._type == Tightening::UB && + FloatUtils::gte( newBound._value, bound._value, 0.0001 ) ) ) ); + } + + for ( const auto &bound : newBounds ) + { + foundTighter |= + ( newBound._type == bound._type && newBound._variable == bound._variable && + ( ( newBound._type == Tightening::LB && + FloatUtils::lt( newBound._value, bound._value, 0.0001 ) ) || + ( newBound._type == Tightening::UB && + FloatUtils::gt( newBound._value, bound._value, 0.0001 ) ) ) ); + } + + if ( !foundTighter ) + minimalBounds.append( newBound ); + } + return minimalBounds; + } + + void updateTableau( MockTableau &tableau, List &tightenings ) + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + { + tableau.setLowerBound( tightening._variable, tightening._value ); + } + + if ( tightening._type == Tightening::UB ) + { + tableau.setUpperBound( tightening._variable, tightening._value ); + } + } + } +}; From d5dd47ac498c6667080efc034a9c5bee2dfbe4cf Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Wed, 20 Aug 2025 22:35:02 +0300 Subject: [PATCH 68/81] Current progress in PMNR: 1. Corrections to initializeSymbolicBounds (in DeepPoly module), required by PMNR's selectConstraints. 2. Implemented unit tests for parameterisedSymbolicBoundPropagation and initializeSymbolicBounds, in Test_NetworkLevelReasoner.h and nlr/tests/Test_NetworkLevelReasoner.h. 3. Various corrections to PMNR's generatePolygonalTightening, getParameterisdPolygonalTighteningLowerBound and related functions (in NetworkLevelReasoner.cpp). 4. Renamings in Layer, LPFormulator, NetworkLevelReasoner for consistency. 5. Few corrections to nlr/tests/Test_NetworkLevelReasoner.h calculations (documentation). 6. One correction to Sigmoid regular SBT (changed if ( FloatUtils::areEqual( FloatUtils::round( sourceLb ), FloatUtils::round( sourceUb ) ) ) to if ( FloatUtils::areEqual( sourceLb, sourceUb ) ) in Layer.cpp's computeSymbolicBoundsForSigmoid. 7. One correction to DeepPoly module (added line _phaseFixed[i] = phaseFixed; in DeepPolyMaxPoolElement.cpp's execute() which is required by its storePredecessorSymbolicBounds() (otherwise the map _phaseFixed would be used without being updated). --- src/configuration/GlobalConfiguration.cpp | 2 +- src/engine/PolygonalTightening.h | 15 +- src/nlr/CMakeLists.txt | 2 +- src/nlr/DeepPolyAbsoluteValueElement.cpp | 16 +- src/nlr/DeepPolyAnalysis.cpp | 33 +- src/nlr/DeepPolyAnalysis.h | 36 +- src/nlr/DeepPolyBilinearElement.cpp | 78 +- src/nlr/DeepPolyElement.cpp | 95 +- src/nlr/DeepPolyElement.h | 42 +- src/nlr/DeepPolyLeakyReLUElement.cpp | 218 +- src/nlr/DeepPolyMaxPoolElement.cpp | 33 +- src/nlr/DeepPolyMaxPoolElement.h | 3 +- src/nlr/DeepPolyReLUElement.cpp | 159 +- src/nlr/DeepPolyRoundElement.cpp | 16 +- src/nlr/DeepPolySigmoidElement.cpp | 16 +- src/nlr/DeepPolySignElement.cpp | 146 +- src/nlr/DeepPolySoftmaxElement.cpp | 19 +- src/nlr/DeepPolyWeightedSumElement.cpp | 6 +- src/nlr/LPFormulator.cpp | 28 +- src/nlr/LPFormulator.h | 10 +- src/nlr/Layer.cpp | 80 +- src/nlr/NLRError.h | 3 +- src/nlr/NetworkLevelReasoner.cpp | 1198 +- src/nlr/NetworkLevelReasoner.h | 92 +- src/nlr/tests/Test_DeepPolyAnalysis.h | 58 - src/nlr/tests/Test_NetworkLevelReasoner.h | 1923 +++- src/nlr/tests/Test_PMNR.h | 11387 -------------------- src/nlr/tests/Test_PMNRSelection.h | 8821 +++++++++++++++ 28 files changed, 12024 insertions(+), 12511 deletions(-) delete mode 100644 src/nlr/tests/Test_PMNR.h create mode 100644 src/nlr/tests/Test_PMNRSelection.h diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index e5623a826c..bfd4304643 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -78,7 +78,7 @@ const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DEC const unsigned GlobalConfiguration::INVPROP_MAX_ITERATIONS = 25000; const double GlobalConfiguration::INVPROP_STEP_SIZE = 0.0025; -const double GlobalConfiguration::INVPROP_LEARNING_RATE = 0.025; +const double GlobalConfiguration::INVPROP_LEARNING_RATE = 0.25; const double GlobalConfiguration::INVPROP_WEIGHT_DECAY = 0; const double GlobalConfiguration::INVPROP_INITIAL_ALPHA = 0.5; const double GlobalConfiguration::INVPROP_INITIAL_GAMMA = 0.025; diff --git a/src/engine/PolygonalTightening.h b/src/engine/PolygonalTightening.h index e007f1bbb2..2a941c37f1 100644 --- a/src/engine/PolygonalTightening.h +++ b/src/engine/PolygonalTightening.h @@ -15,6 +15,7 @@ #ifndef __PolygonalTightening_h__ #define __PolygonalTightening_h__ +#include "FloatUtils.h" #include "Map.h" #include "NeuronIndex.h" @@ -88,14 +89,20 @@ class PolygonalTightening void dump() const { - printf( "PolygonalTightening: x %s %.2lf\n", _type == LB ? ">=" : "<=", _value ); + printf( "PolygonalTightening: " ); for ( const auto &pair : _neuronToCoefficient ) { - printf( "NeuronIndex: (layer %u, neuron %u), Coefficient: %.2lf )\n", + double coeff = pair.second; + if ( FloatUtils::isZero( coeff ) ) + continue; + + printf( "%s %.2lf neuron%u_%u ", + coeff > 0 ? "+" : "-", + FloatUtils::abs( coeff ), pair.first._layer, - pair.first._neuron, - pair.second ); + pair.first._neuron ); } + printf( "%s %.2lf\n", _type == LB ? ">=" : "<=", _value ); } }; #endif // __PolygonalTightening_h__s diff --git a/src/nlr/CMakeLists.txt b/src/nlr/CMakeLists.txt index c70502cd15..b0bac42cc6 100644 --- a/src/nlr/CMakeLists.txt +++ b/src/nlr/CMakeLists.txt @@ -21,7 +21,7 @@ network_level_reasoner_add_unit_test(ParallelSolver) if (${ENABLE_GUROBI}) network_level_reasoner_add_unit_test(LPRelaxation) - network_level_reasoner_add_unit_test(PMNR) + network_level_reasoner_add_unit_test(PMNRSelection) endif() if (${BUILD_PYTHON}) diff --git a/src/nlr/DeepPolyAbsoluteValueElement.cpp b/src/nlr/DeepPolyAbsoluteValueElement.cpp index 73cfd5209c..7ad5fc4a6b 100644 --- a/src/nlr/DeepPolyAbsoluteValueElement.cpp +++ b/src/nlr/DeepPolyAbsoluteValueElement.cpp @@ -105,17 +105,15 @@ void DeepPolyAbsoluteValueElement::execute( void DeepPolyAbsoluteValueElement::storePredecessorSymbolicBounds() { - double *currentSymbolicLb = ( *_symbolicLbInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicUb = ( *_symbolicUbInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicLowerBias = ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicUpperBias = ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex]; - for ( unsigned i = 0; i < _size; ++i ) { - currentSymbolicLb[i * _size] = _symbolicLb[i]; - currentSymbolicUb[i * _size] = _symbolicUb[i]; - currentSymbolicLowerBias[i] = _symbolicLowerBias[i]; - currentSymbolicUpperBias[i] = _symbolicUpperBias[i]; + NeuronIndex sourceIndex = *( _layer->getActivationSources( i ).begin() ); + ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = + _symbolicLb[i]; + ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = + _symbolicUb[i]; + ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicUpperBias[i]; } } diff --git a/src/nlr/DeepPolyAnalysis.cpp b/src/nlr/DeepPolyAnalysis.cpp index 8e80ed1774..7f94de75a8 100644 --- a/src/nlr/DeepPolyAnalysis.cpp +++ b/src/nlr/DeepPolyAnalysis.cpp @@ -39,20 +39,25 @@ namespace NLR { -DeepPolyAnalysis::DeepPolyAnalysis( LayerOwner *layerOwner, - bool storeOutputLayerSymbolicBounds, - bool storeSymbolicBoundsInTermsOfPredecessor, - Map *outputLayerSymbolicLb, - Map *outputLayerSymbolicUb, - Map *outputLayerSymbolicLowerBias, - Map *outputLayerSymbolicUpperBias, - Map *symbolicLbInTermsOfPredecessor, - Map *symbolicUbInTermsOfPredecessor, - Map *symbolicLowerBiasInTermsOfPredecessor, - Map *symbolicUpperBiasInTermsOfPredecessor ) +DeepPolyAnalysis::DeepPolyAnalysis( + LayerOwner *layerOwner, + bool storeOutputLayerSymbolicBounds, + bool storeSymbolicBoundsInTermsOfPredecessor, + bool useParameterisedSBT, + Map> *layerIndicesToParameters, + Map> *outputLayerSymbolicLb, + Map> *outputLayerSymbolicUb, + Map> *outputLayerSymbolicLowerBias, + Map> *outputLayerSymbolicUpperBias, + Map> *symbolicLbInTermsOfPredecessor, + Map> *symbolicUbInTermsOfPredecessor, + Map> *symbolicLowerBiasInTermsOfPredecessor, + Map> *symbolicUpperBiasInTermsOfPredecessor ) : _layerOwner( layerOwner ) , _storeOutputLayerSymbolicBounds( storeOutputLayerSymbolicBounds ) , _storeSymbolicBoundsInTermsOfPredecessor( storeSymbolicBoundsInTermsOfPredecessor ) + , _useParameterisedSBT( useParameterisedSBT ) + , _layerIndicesToParameters( layerIndicesToParameters ) , _work1SymbolicLb( NULL ) , _work1SymbolicUb( NULL ) , _work2SymbolicLb( NULL ) @@ -260,12 +265,18 @@ DeepPolyElement *DeepPolyAnalysis::createDeepPolyElement( Layer *layer ) throw NLRError( NLRError::LAYER_TYPE_NOT_SUPPORTED, Stringf( "Layer %u not yet supported", layer->getLayerType() ).ascii() ); + Map _layerIndexToLayer = _layerOwner->getLayerIndexToLayer(); + Layer *outputLayer = _layerIndexToLayer[_layerOwner->getNumberOfLayers() - 1]; + unsigned outputLayerSize = outputLayer->getSize(); + deepPolyElement->setOutputLayerSize( outputLayerSize ); deepPolyElement->setStoreSymbolicBoundsInTermsOfPredecessor( _storeSymbolicBoundsInTermsOfPredecessor ); if ( layer->getLayerIndex() == _layerOwner->getNumberOfLayers() - 1 ) { deepPolyElement->setStoreOutputLayerSymbolicBounds( _storeOutputLayerSymbolicBounds ); } + deepPolyElement->setUseParameterisedSBT( _useParameterisedSBT ); + deepPolyElement->setLayerIndicesToParameters( _layerIndicesToParameters ); deepPolyElement->setSymbolicBoundsMemory( _outputLayerSymbolicLb, _outputLayerSymbolicUb, _outputLayerSymbolicLowerBias, diff --git a/src/nlr/DeepPolyAnalysis.h b/src/nlr/DeepPolyAnalysis.h index 76eb25e69f..4e6ecdc791 100644 --- a/src/nlr/DeepPolyAnalysis.h +++ b/src/nlr/DeepPolyAnalysis.h @@ -31,14 +31,16 @@ class DeepPolyAnalysis DeepPolyAnalysis( LayerOwner *layerOwner, bool storeOutputLayerSymbolicBounds = false, bool storeSymbolicBoundsInTermsOfPredecessor = false, - Map *outputLayerSymbolicLb = NULL, - Map *outputLayerSymbolicUb = NULL, - Map *outputLayerSymbolicLowerBias = NULL, - Map *outputLayerSymbolicUpperBias = NULL, - Map *symbolicLbInTermsOfPredecessor = NULL, - Map *symbolicUbInTermsOfPredecessor = NULL, - Map *symbolicLowerBiasInTermsOfPredecessor = NULL, - Map *symbolicUpperBiasInTermsOfPredecessor = NULL ); + bool useParameterisedSBT = false, + Map> *layerIndicesToParameters = NULL, + Map> *outputLayerSymbolicLb = NULL, + Map> *outputLayerSymbolicUb = NULL, + Map> *outputLayerSymbolicLowerBias = NULL, + Map> *outputLayerSymbolicUpperBias = NULL, + Map> *symbolicLbInTermsOfPredecessor = NULL, + Map> *symbolicUbInTermsOfPredecessor = NULL, + Map> *symbolicLowerBiasInTermsOfPredecessor = NULL, + Map> *symbolicUpperBiasInTermsOfPredecessor = NULL ); ~DeepPolyAnalysis(); void run(); @@ -47,6 +49,8 @@ class DeepPolyAnalysis LayerOwner *_layerOwner; bool _storeOutputLayerSymbolicBounds; bool _storeSymbolicBoundsInTermsOfPredecessor; + bool _useParameterisedSBT; + Map> *_layerIndicesToParameters; /* Maps layer index to the abstract element @@ -63,15 +67,15 @@ class DeepPolyAnalysis double *_workSymbolicLowerBias; double *_workSymbolicUpperBias; - Map *_outputLayerSymbolicLb; - Map *_outputLayerSymbolicUb; - Map *_outputLayerSymbolicLowerBias; - Map *_outputLayerSymbolicUpperBias; + Map> *_outputLayerSymbolicLb; + Map> *_outputLayerSymbolicUb; + Map> *_outputLayerSymbolicLowerBias; + Map> *_outputLayerSymbolicUpperBias; - Map *_symbolicLbInTermsOfPredecessor; - Map *_symbolicUbInTermsOfPredecessor; - Map *_symbolicLowerBiasInTermsOfPredecessor; - Map *_symbolicUpperBiasInTermsOfPredecessor; + Map> *_symbolicLbInTermsOfPredecessor; + Map> *_symbolicUbInTermsOfPredecessor; + Map> *_symbolicLowerBiasInTermsOfPredecessor; + Map> *_symbolicUpperBiasInTermsOfPredecessor; unsigned _maxLayerSize; diff --git a/src/nlr/DeepPolyBilinearElement.cpp b/src/nlr/DeepPolyBilinearElement.cpp index 8583bdd99b..67505a0c17 100644 --- a/src/nlr/DeepPolyBilinearElement.cpp +++ b/src/nlr/DeepPolyBilinearElement.cpp @@ -89,19 +89,49 @@ void DeepPolyBilinearElement::execute( _lb[i] = std::max( lb, _lb[i] ); _ub[i] = std::min( ub, _ub[i] ); - // Symbolic lower bound: - // out >= alpha * x + beta * y + gamma - // where alpha = lb_y, beta = lb_x, gamma = -lb_x * lb_y - _symbolicLbA[i] = sourceLbs[1]; - _symbolicLbB[i] = sourceLbs[0]; - _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; - - // Symbolic upper bound: - // out <= alpha * x + beta * y + gamma - // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y - _symbolicUbA[i] = sourceUbs[1]; - _symbolicUbB[i] = sourceLbs[0]; - _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; + if ( !_useParameterisedSBT ) + { + // Symbolic lower bound: + // out >= alpha * x + beta * y + gamma + // where alpha = lb_y, beta = lb_x, gamma = -lb_x * lb_y + _symbolicLbA[i] = sourceLbs[1]; + _symbolicLbB[i] = sourceLbs[0]; + _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; + + // Symbolic upper bound: + // out <= alpha * x + beta * y + gamma + // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y + _symbolicUbA[i] = sourceUbs[1]; + _symbolicUbB[i] = sourceLbs[0]; + _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; + } + else + { + Vector coeffs = ( *_layerIndicesToParameters )[_layerIndex]; + ASSERT( coeffs.size() == 2 ); + ASSERT( coeffs[0] >= 0 && coeffs[0] <= 1 ); + ASSERT( coeffs[1] >= 0 && coeffs[1] <= 1 ); + // Bilinear linear relaxation (arXiv:2405.21063v2 [cs.LG]) + // Lower bound: out >= aLower * x + bLower * y + c_l, where + // aLower = alpha1 * l_y + ( 1 - alpha1 ) * u_y + // bLower = alpha1 * l_x + ( 1 - alpha1 ) * u_x + // c_l = -alpha1 * l_x * l_y - ( 1 - alpha1 ) * u_x * u_y + + // Upper bound: out <= aUpper * x + bUpper * y + c_u, where + // aUpper = alpha2 * u_y + ( 1 - alpha2 ) * l_y + // bUpper = alpha2 * l_x + ( 1 - alpha2 ) * u_x + // c_u = -alpha2 * -l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + + _symbolicLbA[i] = coeffs[0] * sourceLbs[1] + ( 1 - coeffs[0] ) * sourceUbs[1]; + _symbolicUbA[i] = coeffs[1] * sourceUbs[1] + ( 1 - coeffs[1] ) * sourceLbs[1]; + _symbolicLowerBias[i] = -coeffs[0] * sourceLbs[0] * sourceLbs[1] - + ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1]; + + _symbolicLbB[i] = coeffs[0] * sourceLbs[0] + ( 1 - coeffs[0] ) * sourceUbs[0]; + _symbolicUbB[i] = coeffs[1] * sourceLbs[0] + ( 1 - coeffs[1] ) * sourceUbs[0]; + _symbolicUpperBias[i] = -coeffs[1] * sourceLbs[0] * sourceUbs[1] - + ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1]; + } } if ( _storeSymbolicBoundsInTermsOfPredecessor ) @@ -122,19 +152,19 @@ void DeepPolyBilinearElement::execute( void DeepPolyBilinearElement::storePredecessorSymbolicBounds() { - double *currentSymbolicLb = ( *_symbolicLbInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicUb = ( *_symbolicUbInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicLowerBias = ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicUpperBias = ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex]; - for ( unsigned i = 0; i < _size; ++i ) { - currentSymbolicLb[i * _size] = _symbolicLbA[i]; - currentSymbolicUb[i * _size] = _symbolicUbA[i]; - currentSymbolicLb[i * _size + 1] = _symbolicLbB[i]; - currentSymbolicUb[i * _size + 1] = _symbolicUbB[i]; - currentSymbolicLowerBias[i] = _symbolicLowerBias[i]; - currentSymbolicUpperBias[i] = _symbolicUpperBias[i]; + List sources = _layer->getActivationSources( i ); + ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * sources.begin()->_neuron + i] = + _symbolicLbA[i]; + ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * sources.begin()->_neuron + i] = + _symbolicUbA[i]; + ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * sources.back()._neuron + i] = + _symbolicLbB[i]; + ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * sources.back()._neuron + i] = + _symbolicUbB[i]; + ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicUpperBias[i]; } } diff --git a/src/nlr/DeepPolyElement.cpp b/src/nlr/DeepPolyElement.cpp index 8f21cd30e4..3e7f1c9dc2 100644 --- a/src/nlr/DeepPolyElement.cpp +++ b/src/nlr/DeepPolyElement.cpp @@ -23,6 +23,9 @@ DeepPolyElement::DeepPolyElement() , _layerIndex( 0 ) , _storeOutputLayerSymbolicBounds( false ) , _storeSymbolicBoundsInTermsOfPredecessor( false ) + , _useParameterisedSBT( false ) + , _layerIndicesToParameters( NULL ) + , _outputLayerSize( 0 ) , _symbolicLb( NULL ) , _symbolicUb( NULL ) , _symbolicLowerBias( NULL ) @@ -105,6 +108,22 @@ void DeepPolyElement::setStoreSymbolicBoundsInTermsOfPredecessor( _storeSymbolicBoundsInTermsOfPredecessor = storeSymbolicBoundsInTermsOfPredecessor; } +void DeepPolyElement::setUseParameterisedSBT( bool useParameterisedSBT ) +{ + _useParameterisedSBT = useParameterisedSBT; +} + +void DeepPolyElement::setLayerIndicesToParameters( + Map> *layerIndicesToParameters ) +{ + _layerIndicesToParameters = layerIndicesToParameters; +} + +void DeepPolyElement::setOutputLayerSize( unsigned outputLayerSize ) +{ + _outputLayerSize = outputLayerSize; +} + double DeepPolyElement::getLowerBoundFromLayer( unsigned index ) const { ASSERT( index < getSize() ); @@ -170,14 +189,14 @@ void DeepPolyElement::setWorkingMemory( double *work1SymbolicLb, } void DeepPolyElement::setSymbolicBoundsMemory( - Map *outputLayerSymbolicLb, - Map *outputLayerSymbolicUb, - Map *outputLayerSymbolicLowerBias, - Map *outputLayerSymbolicUpperBias, - Map *symbolicLbInTermsOfPredecessor, - Map *symbolicUbInTermsOfPredecessor, - Map *symbolicLowerBiasInTermsOfPredecessor, - Map *symbolicUpperBiasInTermsOfPredecessor ) + Map> *outputLayerSymbolicLb, + Map> *outputLayerSymbolicUb, + Map> *outputLayerSymbolicLowerBias, + Map> *outputLayerSymbolicUpperBias, + Map> *symbolicLbInTermsOfPredecessor, + Map> *symbolicUbInTermsOfPredecessor, + Map> *symbolicLowerBiasInTermsOfPredecessor, + Map> *symbolicUpperBiasInTermsOfPredecessor ) { _outputLayerSymbolicLb = outputLayerSymbolicLb; _outputLayerSymbolicUb = outputLayerSymbolicUb; @@ -190,7 +209,6 @@ void DeepPolyElement::setSymbolicBoundsMemory( } void DeepPolyElement::storeOutputSymbolicBounds( - unsigned sourceLayerSize, double *work1SymbolicLb, double *work1SymbolicUb, double *workSymbolicLowerBias, @@ -200,10 +218,28 @@ void DeepPolyElement::storeOutputSymbolicBounds( Set &residualLayerIndices, const Map &deepPolyElementsBefore ) { - double *workSymbolicLowerBias2 = new double[_size]; - double *workSymbolicUpperBias2 = new double[_size]; - memcpy( workSymbolicLowerBias2, workSymbolicLowerBias, _size * sizeof( double ) ); - memcpy( workSymbolicUpperBias2, workSymbolicUpperBias, _size * sizeof( double ) ); + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _layer->neuronEliminated( i ) ) + { + double value = _layer->getEliminatedNeuronValue( i ); + for ( unsigned j = 0; j < _outputLayerSize; ++j ) + { + workSymbolicLowerBias[i] += work1SymbolicLb[i * _size + j] * value; + workSymbolicUpperBias[i] += work1SymbolicUb[i * _size + j] * value; + work1SymbolicLb[i * _size + j] = 0; + work1SymbolicUb[i * _size + j] = 0; + } + } + } + + Vector symbolicLowerBiasConcretizedResiduals( _outputLayerSize, 0 ); + Vector symbolicUpperBiasConcretizedResiduals( _outputLayerSize, 0 ); + for ( unsigned i = 0; i < _outputLayerSize; ++i ) + { + symbolicLowerBiasConcretizedResiduals[i] = workSymbolicLowerBias[i]; + symbolicUpperBiasConcretizedResiduals[i] = workSymbolicUpperBias[i]; + } for ( const auto &residualLayerIndex : residualLayerIndices ) { @@ -219,52 +255,45 @@ void DeepPolyElement::storeOutputSymbolicBounds( double sourceUb = residualElement->getUpperBoundFromLayer( i ) + GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; - for ( unsigned j = 0; j < _size; ++j ) + for ( unsigned j = 0; j < _outputLayerSize; ++j ) { // Compute lower bound double weight = currentResidualLb[i * _size + j]; if ( weight >= 0 ) { - workSymbolicLowerBias2[j] += ( weight * sourceLb ); + symbolicLowerBiasConcretizedResiduals[j] += ( weight * sourceLb ); } else { - workSymbolicLowerBias2[j] += ( weight * sourceUb ); + symbolicLowerBiasConcretizedResiduals[j] += ( weight * sourceUb ); } // Compute upper bound weight = currentResidualUb[i * _size + j]; if ( weight >= 0 ) { - workSymbolicUpperBias2[j] += ( weight * sourceUb ); + symbolicUpperBiasConcretizedResiduals[j] += ( weight * sourceUb ); } else { - workSymbolicUpperBias2[j] += ( weight * sourceLb ); + symbolicUpperBiasConcretizedResiduals[j] += ( weight * sourceLb ); } } } } - double *currentSymbolicLb = ( *_outputLayerSymbolicLb )[_layerIndex]; - double *currentSymbolicUb = ( *_outputLayerSymbolicUb )[_layerIndex]; - double *currentSymbolicLowerBias = ( *_outputLayerSymbolicLowerBias )[_layerIndex]; - double *currentSymbolicUpperBias = ( *_outputLayerSymbolicUpperBias )[_layerIndex]; - memcpy( currentSymbolicLb, work1SymbolicLb, _size * sourceLayerSize * sizeof( double ) ); - memcpy( currentSymbolicUb, work1SymbolicUb, _size * sourceLayerSize * sizeof( double ) ); - memcpy( currentSymbolicLowerBias, workSymbolicLowerBias2, _size * sizeof( double ) ); - memcpy( currentSymbolicUpperBias, workSymbolicUpperBias2, _size * sizeof( double ) ); - - if ( workSymbolicLowerBias2 ) + for ( unsigned i = 0; i < _size * _outputLayerSize; ++i ) { - delete[] workSymbolicLowerBias2; - workSymbolicLowerBias2 = NULL; + ( *_outputLayerSymbolicLb )[_layerIndex][i] = work1SymbolicLb[i]; + ( *_outputLayerSymbolicUb )[_layerIndex][i] = work1SymbolicUb[i]; } - if ( workSymbolicUpperBias2 ) + for ( unsigned i = 0; i < _outputLayerSize; ++i ) { - delete[] workSymbolicUpperBias2; - workSymbolicUpperBias2 = NULL; + ( *_outputLayerSymbolicLowerBias )[_layerIndex][i] = + symbolicLowerBiasConcretizedResiduals[i]; + ( *_outputLayerSymbolicUpperBias )[_layerIndex][i] = + symbolicUpperBiasConcretizedResiduals[i]; } } diff --git a/src/nlr/DeepPolyElement.h b/src/nlr/DeepPolyElement.h index 2fbd0b0a19..d6da8f0d57 100644 --- a/src/nlr/DeepPolyElement.h +++ b/src/nlr/DeepPolyElement.h @@ -71,6 +71,9 @@ class DeepPolyElement void setStoreOutputLayerSymbolicBounds( bool storeOutputLayerSymbolicBounds ); void setStoreSymbolicBoundsInTermsOfPredecessor( bool storeSymbolicBoundsInTermsOfPredecessor ); + void setUseParameterisedSBT( bool useParameterisedSBT ); + void setLayerIndicesToParameters( Map> *layerIndicesToParameters ); + void setOutputLayerSize( unsigned outputLayerSize ); void setWorkingMemory( double *work1SymbolicLb, double *work1SymbolicUb, @@ -79,18 +82,18 @@ class DeepPolyElement double *workSymbolicLowerBias, double *workSymbolicUpperBias ); - void setSymbolicBoundsMemory( Map *outputLayerSymbolicLb, - Map *outputLayerSymbolicUb, - Map *outputLayerSymbolicLowerBias, - Map *outputLayerSymbolicUpperBias, - Map *symbolicLbInTermsOfPredecessor, - Map *symbolicUbInTermsOfPredecessor, - Map *symbolicLowerBiasInTermsOfPredecessor, - Map *symbolicUpperBiasInTermsOfPredecessor ); + void + setSymbolicBoundsMemory( Map> *outputLayerSymbolicLb, + Map> *outputLayerSymbolicUb, + Map> *outputLayerSymbolicLowerBias, + Map> *outputLayerSymbolicUpperBias, + Map> *symbolicLbInTermsOfPredecessor, + Map> *symbolicUbInTermsOfPredecessor, + Map> *symbolicLowerBiasInTermsOfPredecessor, + Map> *symbolicUpperBiasInTermsOfPredecessor ); void - storeOutputSymbolicBounds( unsigned sourceLayerSize, - double *work1SymbolicLb, + storeOutputSymbolicBounds( double *work1SymbolicLb, double *work1SymbolicUb, double *workSymbolicLowerBias, double *workSymbolicUpperBias, @@ -108,6 +111,9 @@ class DeepPolyElement unsigned _layerIndex; bool _storeOutputLayerSymbolicBounds; bool _storeSymbolicBoundsInTermsOfPredecessor; + bool _useParameterisedSBT; + Map> *_layerIndicesToParameters; + unsigned _outputLayerSize; /* Abstract element described in @@ -128,15 +134,15 @@ class DeepPolyElement double *_workSymbolicLowerBias; double *_workSymbolicUpperBias; - Map *_outputLayerSymbolicLb; - Map *_outputLayerSymbolicUb; - Map *_outputLayerSymbolicLowerBias; - Map *_outputLayerSymbolicUpperBias; + Map> *_outputLayerSymbolicLb; + Map> *_outputLayerSymbolicUb; + Map> *_outputLayerSymbolicLowerBias; + Map> *_outputLayerSymbolicUpperBias; - Map *_symbolicLbInTermsOfPredecessor; - Map *_symbolicUbInTermsOfPredecessor; - Map *_symbolicLowerBiasInTermsOfPredecessor; - Map *_symbolicUpperBiasInTermsOfPredecessor; + Map> *_symbolicLbInTermsOfPredecessor; + Map> *_symbolicUbInTermsOfPredecessor; + Map> *_symbolicLowerBiasInTermsOfPredecessor; + Map> *_symbolicUpperBiasInTermsOfPredecessor; void allocateMemory(); void freeMemoryIfNeeded(); diff --git a/src/nlr/DeepPolyLeakyReLUElement.cpp b/src/nlr/DeepPolyLeakyReLUElement.cpp index ea4ef5a555..3bc3358abf 100644 --- a/src/nlr/DeepPolyLeakyReLUElement.cpp +++ b/src/nlr/DeepPolyLeakyReLUElement.cpp @@ -50,87 +50,165 @@ void DeepPolyLeakyReLUElement::execute( double sourceLb = predecessor->getLowerBound( sourceIndex._neuron ); double sourceUb = predecessor->getUpperBound( sourceIndex._neuron ); - if ( !FloatUtils::isNegative( sourceLb ) ) + if ( !_useParameterisedSBT ) { - // Phase active - // Symbolic bound: x_b <= x_f <= x_b - // Concrete bound: lb_b <= x_f <= ub_b - _symbolicUb[i] = 1; - _symbolicUpperBias[i] = 0; - _ub[i] = sourceUb; - - _symbolicLb[i] = 1; - _symbolicLowerBias[i] = 0; - _lb[i] = sourceLb; - } - else if ( !FloatUtils::isPositive( sourceUb ) ) - { - // Phase inactive - // Symbolic bound: slope * x_b <= x_f <= slope * x_b - // Concrete bound: slope * lb_b <= x_f <= slope * ub_b - _symbolicUb[i] = _slope; - _symbolicUpperBias[i] = 0; - _ub[i] = _slope * sourceUb; - - _symbolicLb[i] = _slope; - _symbolicLowerBias[i] = 0; - _lb[i] = _slope * sourceLb; - } - else - { - // LeakyReLU not fixed - // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) - // Concrete upper bound: x_f <= ub_b - double width = sourceUb - sourceLb; - double coeff = ( sourceUb - _slope * sourceLb ) / width; - - if ( _slope <= 1 ) + if ( !FloatUtils::isNegative( sourceLb ) ) { - _symbolicUb[i] = coeff; - _symbolicUpperBias[i] = ( ( _slope - 1 ) * sourceUb * sourceLb ) / width; + // Phase active + // Symbolic bound: x_b <= x_f <= x_b + // Concrete bound: lb_b <= x_f <= ub_b + _symbolicUb[i] = 1; + _symbolicUpperBias[i] = 0; _ub[i] = sourceUb; - // For the lower bound, in general, x_f >= lambda * x_b, where - // 0 <= lambda <= 1, would be a sound lower bound. We - // use the heuristic described in section 4.1 of - // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf - // to set the value of lambda (either 0 or 1 is considered). - if ( sourceUb > sourceLb ) + _symbolicLb[i] = 1; + _symbolicLowerBias[i] = 0; + _lb[i] = sourceLb; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // Phase inactive + // Symbolic bound: slope * x_b <= x_f <= slope * x_b + // Concrete bound: slope * lb_b <= x_f <= slope * ub_b + _symbolicUb[i] = _slope; + _symbolicUpperBias[i] = 0; + _ub[i] = _slope * sourceUb; + + _symbolicLb[i] = _slope; + _symbolicLowerBias[i] = 0; + _lb[i] = _slope * sourceLb; + } + else + { + // LeakyReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + // Concrete upper bound: x_f <= ub_b + double width = sourceUb - sourceLb; + double weight = ( sourceUb - _slope * sourceLb ) / width; + + if ( _slope <= 1 ) { - // lambda = 1 - // Symbolic lower bound: x_f >= x_b - // Concrete lower bound: x_f >= sourceLb - _symbolicLb[i] = 1; - _symbolicLowerBias[i] = 0; - _lb[i] = sourceLb; + _symbolicUb[i] = weight; + _symbolicUpperBias[i] = ( ( _slope - 1 ) * sourceUb * sourceLb ) / width; + _ub[i] = sourceUb; + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We + // use the heuristic described in section 4.1 of + // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + // to set the value of lambda (either 0 or 1 is considered). + if ( sourceUb > sourceLb ) + { + // lambda = 1 + // Symbolic lower bound: x_f >= x_b + // Concrete lower bound: x_f >= sourceLb + _symbolicLb[i] = 1; + _symbolicLowerBias[i] = 0; + _lb[i] = sourceLb; + } + else + { + // lambda = 1 + // Symbolic lower bound: x_f >= 0 + // Concrete lower bound: x_f >= 0 + _symbolicLb[i] = _slope; + _symbolicLowerBias[i] = 0; + _lb[i] = _slope * sourceLb; + } } else { - // lambda = 1 - // Symbolic lower bound: x_f >= 0 - // Concrete lower bound: x_f >= 0 - _symbolicLb[i] = _slope; - _symbolicLowerBias[i] = 0; + _symbolicLb[i] = weight; + _symbolicLowerBias[i] = ( ( _slope - 1 ) * sourceUb * sourceLb ) / width; _lb[i] = _slope * sourceLb; + + if ( sourceUb > sourceLb ) + { + _symbolicUb[i] = 1; + _symbolicUpperBias[i] = 0; + _ub[i] = sourceUb; + } + else + { + _symbolicUb[i] = _slope; + _symbolicLowerBias[i] = 0; + _ub[i] = _slope * sourceUb; + } } } - else + } + else + { + Vector coeffs = ( *_layerIndicesToParameters )[_layerIndex]; + ASSERT( coeffs.size() == 1 ); + double coeff = coeffs[0]; + ASSERT( coeff >= 0 && coeff <= 1 ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // Phase active + // Symbolic bound: x_b <= x_f <= x_b + // Concrete bound: lb_b <= x_f <= ub_b + _symbolicUb[i] = 1; + _symbolicUpperBias[i] = 0; + _ub[i] = sourceUb; + + _symbolicLb[i] = 1; + _symbolicLowerBias[i] = 0; + _lb[i] = sourceLb; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) { - _symbolicLb[i] = coeff; - _symbolicLowerBias[i] = ( ( _slope - 1 ) * sourceUb * sourceLb ) / width; + // Phase inactive + // Symbolic bound: slope * x_b <= x_f <= slope * x_b + // Concrete bound: slope * lb_b <= x_f <= slope * ub_b + _symbolicUb[i] = _slope; + _symbolicUpperBias[i] = 0; + _ub[i] = _slope * sourceUb; + + _symbolicLb[i] = _slope; + _symbolicLowerBias[i] = 0; _lb[i] = _slope * sourceLb; + } + else + { + // LeakyReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + // Concrete upper bound: x_f <= ub_b + double width = sourceUb - sourceLb; + double weight = ( sourceUb - _slope * sourceLb ) / width; - if ( sourceUb > sourceLb ) + if ( _slope <= 1 ) { - _symbolicUb[i] = 1; - _symbolicUpperBias[i] = 0; + _symbolicUb[i] = weight; + _symbolicUpperBias[i] = ( ( _slope - 1 ) * sourceUb * sourceLb ) / width; _ub[i] = sourceUb; + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We + // use the heuristic described in section 4.1 of + // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + // to set the value of lambda (either 0 or 1 is considered). + + // lambda = ( ( 1 - _slope ) * coeff + _slope ) + // Symbolic lower bound: x_f >= ( ( 1 - _slope ) * coeff + _slope ) * x_b + // Concrete lower bound: x_f >= ( ( 1 - _slope ) * coeff + _slope ) * sourceLb + _symbolicLb[i] = ( ( 1 - _slope ) * coeff + _slope ); + _symbolicLowerBias[i] = 0; + _lb[i] = ( ( 1 - _slope ) * coeff + _slope ) * sourceLb; } else { - _symbolicUb[i] = _slope; - _symbolicLowerBias[i] = 0; - _ub[i] = _slope * sourceUb; + _symbolicLb[i] = weight; + _symbolicLowerBias[i] = ( ( _slope - 1 ) * sourceUb * sourceLb ) / width; + _lb[i] = _slope * sourceLb; + + // lambda = ( ( 1 - _slope ) * coeff + _slope ) + // Symbolic upper bound: x_f >= ( ( 1 - _slope ) * coeff + _slope ) * x_b + // Concrete upper bound: x_f >= ( ( 1 - _slope ) * coeff + _slope ) * sourceUb + _symbolicUb[i] = ( ( 1 - _slope ) * coeff + _slope ); + _symbolicUpperBias[i] = 0; + _ub[i] = ( ( 1 - _slope ) * coeff + _slope ) * sourceUb; } } } @@ -154,17 +232,15 @@ void DeepPolyLeakyReLUElement::execute( void DeepPolyLeakyReLUElement::storePredecessorSymbolicBounds() { - double *currentSymbolicLb = ( *_symbolicLbInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicUb = ( *_symbolicUbInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicLowerBias = ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicUpperBias = ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex]; - for ( unsigned i = 0; i < _size; ++i ) { - currentSymbolicLb[i * _size] = _symbolicLb[i]; - currentSymbolicUb[i * _size] = _symbolicUb[i]; - currentSymbolicLowerBias[i] = _symbolicLowerBias[i]; - currentSymbolicUpperBias[i] = _symbolicUpperBias[i]; + NeuronIndex sourceIndex = *( _layer->getActivationSources( i ).begin() ); + ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = + _symbolicLb[i]; + ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = + _symbolicUb[i]; + ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicUpperBias[i]; } } diff --git a/src/nlr/DeepPolyMaxPoolElement.cpp b/src/nlr/DeepPolyMaxPoolElement.cpp index 89c7921a46..cdc5b195b5 100644 --- a/src/nlr/DeepPolyMaxPoolElement.cpp +++ b/src/nlr/DeepPolyMaxPoolElement.cpp @@ -40,7 +40,6 @@ void DeepPolyMaxPoolElement::execute( // Update the symbolic and concrete upper- and lower- bounds // of each neuron - Vector phasesFixed( _size ); Vector maxLowerBoundIndices( _size ); Vector maxUpperBounds( _size ); for ( unsigned i = 0; i < _size; ++i ) @@ -86,7 +85,7 @@ void DeepPolyMaxPoolElement::execute( } } - phasesFixed[i] = phaseFixed ? 1 : 0; + _phaseFixed[i] = phaseFixed; maxLowerBoundIndices[i] = indexOfMaxLowerBound._neuron; maxUpperBounds[i] = maxUpperBound; @@ -120,37 +119,35 @@ void DeepPolyMaxPoolElement::execute( if ( _storeSymbolicBoundsInTermsOfPredecessor ) { - storePredecessorSymbolicBounds( phasesFixed, maxLowerBoundIndices, maxUpperBounds ); + storePredecessorSymbolicBounds( maxLowerBoundIndices, maxUpperBounds ); } log( "Executing - done" ); } void DeepPolyMaxPoolElement::storePredecessorSymbolicBounds( - const Vector &phaseFixed, const Vector &indexOfMaxLowerBound, const Vector &maxUpperBound ) { - double *currentSymbolicLb = ( *_symbolicLbInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicUb = ( *_symbolicUbInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicLowerBias = ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicUpperBias = ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex]; - for ( unsigned i = 0; i < _size; ++i ) { - if ( phaseFixed[i] > 0 ) + if ( _phaseFixed[i] > 0 ) { - currentSymbolicLb[i * _size + indexOfMaxLowerBound[i]] = 1; - currentSymbolicUb[i * _size + indexOfMaxLowerBound[i]] = 1; - currentSymbolicLowerBias[i] = 0; - currentSymbolicUpperBias[i] = 0; + ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * indexOfMaxLowerBound[i] + i] = + 1; + ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * indexOfMaxLowerBound[i] + i] = + 1; + ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = 0; + ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = 0; } else { - currentSymbolicLb[i * _size + indexOfMaxLowerBound[i]] = 1; - currentSymbolicUb[i * _size + indexOfMaxLowerBound[i]] = 0; - currentSymbolicLowerBias[i] = 0; - currentSymbolicUpperBias[i] = maxUpperBound[i]; + ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * indexOfMaxLowerBound[i] + i] = + 1; + ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * indexOfMaxLowerBound[i] + i] = + 0; + ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = 0; + ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = maxUpperBound[i]; } } } diff --git a/src/nlr/DeepPolyMaxPoolElement.h b/src/nlr/DeepPolyMaxPoolElement.h index 2573ac8c4b..b772104081 100644 --- a/src/nlr/DeepPolyMaxPoolElement.h +++ b/src/nlr/DeepPolyMaxPoolElement.h @@ -34,8 +34,7 @@ class DeepPolyMaxPoolElement : public DeepPolyElement void execute( const Map &deepPolyElementsBefore ); - void storePredecessorSymbolicBounds( const Vector &phaseFixed, - const Vector &indexOfMaxLowerBound, + void storePredecessorSymbolicBounds( const Vector &indexOfMaxLowerBound, const Vector &maxUpperBound ); void symbolicBoundInTermsOfPredecessor( const double *symbolicLb, diff --git a/src/nlr/DeepPolyReLUElement.cpp b/src/nlr/DeepPolyReLUElement.cpp index aa2e6ef80d..290c2250f4 100644 --- a/src/nlr/DeepPolyReLUElement.cpp +++ b/src/nlr/DeepPolyReLUElement.cpp @@ -46,65 +46,118 @@ void DeepPolyReLUElement::execute( const Map &deepP double sourceLb = predecessor->getLowerBound( sourceIndex._neuron ); double sourceUb = predecessor->getUpperBound( sourceIndex._neuron ); - if ( !FloatUtils::isNegative( sourceLb ) ) + if ( !_useParameterisedSBT ) { - // Phase active - // Symbolic bound: x_b <= x_f <= x_b - // Concrete bound: lb_b <= x_f <= ub_b - _symbolicUb[i] = 1; - _symbolicUpperBias[i] = 0; - _ub[i] = sourceUb; - - _symbolicLb[i] = 1; - _symbolicLowerBias[i] = 0; - _lb[i] = sourceLb; - } - else if ( !FloatUtils::isPositive( sourceUb ) ) - { - // Phase inactive - // Symbolic bound: 0 <= x_f <= 0 - // Concrete bound: 0 <= x_f <= 0 - _symbolicUb[i] = 0; - _symbolicUpperBias[i] = 0; - _ub[i] = 0; - - _symbolicLb[i] = 0; - _symbolicLowerBias[i] = 0; - _lb[i] = 0; + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // Phase active + // Symbolic bound: x_b <= x_f <= x_b + // Concrete bound: lb_b <= x_f <= ub_b + _symbolicUb[i] = 1; + _symbolicUpperBias[i] = 0; + _ub[i] = sourceUb; + + _symbolicLb[i] = 1; + _symbolicLowerBias[i] = 0; + _lb[i] = sourceLb; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // Phase inactive + // Symbolic bound: 0 <= x_f <= 0 + // Concrete bound: 0 <= x_f <= 0 + _symbolicUb[i] = 0; + _symbolicUpperBias[i] = 0; + _ub[i] = 0; + + _symbolicLb[i] = 0; + _symbolicLowerBias[i] = 0; + _lb[i] = 0; + } + else + { + // ReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + // Concrete upper bound: x_f <= ub_b + double weight = sourceUb / ( sourceUb - sourceLb ); + _symbolicUb[i] = weight; + _symbolicUpperBias[i] = -sourceLb * weight; + _ub[i] = sourceUb; + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We + // use the heuristic described in section 4.1 of + // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + // to set the value of lambda (either 0 or 1 is considered). + if ( sourceUb > -sourceLb ) + { + // lambda = 1 + // Symbolic lower bound: x_f >= x_b + // Concrete lower bound: x_f >= sourceLb + _symbolicLb[i] = 1; + _symbolicLowerBias[i] = 0; + _lb[i] = sourceLb; + } + else + { + // lambda = 1 + // Symbolic lower bound: x_f >= 0 + // Concrete lower bound: x_f >= 0 + _symbolicLb[i] = 0; + _symbolicLowerBias[i] = 0; + _lb[i] = 0; + } + } } else { - // ReLU not fixed - // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) - // Concrete upper bound: x_f <= ub_b - double coeff = sourceUb / ( sourceUb - sourceLb ); - _symbolicUb[i] = coeff; - _symbolicUpperBias[i] = -sourceLb * coeff; - _ub[i] = sourceUb; - - // For the lower bound, in general, x_f >= lambda * x_b, where - // 0 <= lambda <= 1, would be a sound lower bound. We - // use the heuristic described in section 4.1 of - // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf - // to set the value of lambda (either 0 or 1 is considered). - if ( sourceUb > -sourceLb ) + Vector coeffs = ( *_layerIndicesToParameters )[_layerIndex]; + ASSERT( coeffs.size() == 1 ); + double coeff = coeffs[0]; + ASSERT( coeff >= 0 && coeff <= 1 ); + if ( !FloatUtils::isNegative( sourceLb ) ) { - // lambda = 1 - // Symbolic lower bound: x_f >= x_b - // Concrete lower bound: x_f >= sourceLb + // Phase active + // Symbolic bound: x_b <= x_f <= x_b + // Concrete bound: lb_b <= x_f <= ub_b + _symbolicUb[i] = 1; + _symbolicUpperBias[i] = 0; + _ub[i] = sourceUb; + _symbolicLb[i] = 1; _symbolicLowerBias[i] = 0; _lb[i] = sourceLb; } - else + else if ( !FloatUtils::isPositive( sourceUb ) ) { - // lambda = 1 - // Symbolic lower bound: x_f >= 0 - // Concrete lower bound: x_f >= 0 + // Phase inactive + // Symbolic bound: 0 <= x_f <= 0 + // Concrete bound: 0 <= x_f <= 0 + _symbolicUb[i] = 0; + _symbolicUpperBias[i] = 0; + _ub[i] = 0; + _symbolicLb[i] = 0; _symbolicLowerBias[i] = 0; _lb[i] = 0; } + else + { + // ReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + // Concrete upper bound: x_f <= ub_b + double weight = sourceUb / ( sourceUb - sourceLb ); + _symbolicUb[i] = weight; + _symbolicUpperBias[i] = -sourceLb * weight; + _ub[i] = sourceUb; + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We use coeff + // to set the value of lambda. + _symbolicLb[i] = coeff; + _symbolicLowerBias[i] = 0; + _lb[i] = coeff * sourceLb; + } } log( Stringf( "Neuron%u LB: %f b + %f, UB: %f b + %f", i, @@ -125,17 +178,15 @@ void DeepPolyReLUElement::execute( const Map &deepP void DeepPolyReLUElement::storePredecessorSymbolicBounds() { - double *currentSymbolicLb = ( *_symbolicLbInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicUb = ( *_symbolicUbInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicLowerBias = ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicUpperBias = ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex]; - for ( unsigned i = 0; i < _size; ++i ) { - currentSymbolicLb[i * _size] = _symbolicLb[i]; - currentSymbolicUb[i * _size] = _symbolicUb[i]; - currentSymbolicLowerBias[i] = _symbolicLowerBias[i]; - currentSymbolicUpperBias[i] = _symbolicUpperBias[i]; + NeuronIndex sourceIndex = *( _layer->getActivationSources( i ).begin() ); + ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = + _symbolicLb[i]; + ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = + _symbolicUb[i]; + ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicUpperBias[i]; } } diff --git a/src/nlr/DeepPolyRoundElement.cpp b/src/nlr/DeepPolyRoundElement.cpp index a0ab150d09..754783bd59 100644 --- a/src/nlr/DeepPolyRoundElement.cpp +++ b/src/nlr/DeepPolyRoundElement.cpp @@ -93,17 +93,15 @@ void DeepPolyRoundElement::execute( const Map &deep void DeepPolyRoundElement::storePredecessorSymbolicBounds() { - double *currentSymbolicLb = ( *_symbolicLbInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicUb = ( *_symbolicUbInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicLowerBias = ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicUpperBias = ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex]; - for ( unsigned i = 0; i < _size; ++i ) { - currentSymbolicLb[i * _size] = _symbolicLb[i]; - currentSymbolicUb[i * _size] = _symbolicUb[i]; - currentSymbolicLowerBias[i] = _symbolicLowerBias[i]; - currentSymbolicUpperBias[i] = _symbolicUpperBias[i]; + NeuronIndex sourceIndex = *( _layer->getActivationSources( i ).begin() ); + ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = + _symbolicLb[i]; + ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = + _symbolicUb[i]; + ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicUpperBias[i]; } } diff --git a/src/nlr/DeepPolySigmoidElement.cpp b/src/nlr/DeepPolySigmoidElement.cpp index 419536564c..6cc924a98f 100644 --- a/src/nlr/DeepPolySigmoidElement.cpp +++ b/src/nlr/DeepPolySigmoidElement.cpp @@ -110,17 +110,15 @@ void DeepPolySigmoidElement::execute( void DeepPolySigmoidElement::storePredecessorSymbolicBounds() { - double *currentSymbolicLb = ( *_symbolicLbInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicUb = ( *_symbolicUbInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicLowerBias = ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicUpperBias = ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex]; - for ( unsigned i = 0; i < _size; ++i ) { - currentSymbolicLb[i * _size] = _symbolicLb[i]; - currentSymbolicUb[i * _size] = _symbolicUb[i]; - currentSymbolicLowerBias[i] = _symbolicLowerBias[i]; - currentSymbolicUpperBias[i] = _symbolicUpperBias[i]; + NeuronIndex sourceIndex = *( _layer->getActivationSources( i ).begin() ); + ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = + _symbolicLb[i]; + ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = + _symbolicUb[i]; + ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicUpperBias[i]; } } diff --git a/src/nlr/DeepPolySignElement.cpp b/src/nlr/DeepPolySignElement.cpp index d0a070c4f7..26d9ee26a1 100644 --- a/src/nlr/DeepPolySignElement.cpp +++ b/src/nlr/DeepPolySignElement.cpp @@ -46,47 +46,103 @@ void DeepPolySignElement::execute( const Map &deepP double sourceLb = predecessor->getLowerBound( sourceIndex._neuron ); double sourceUb = predecessor->getUpperBound( sourceIndex._neuron ); - if ( !FloatUtils::isNegative( sourceLb ) ) + if ( !_useParameterisedSBT ) { - // Phase positive - // Symbolic bound: 1 <= x_f <= 1 - // Concrete bound: 1 <= x_f <= 1 - _symbolicUb[i] = 0; - _symbolicUpperBias[i] = 1; - _ub[i] = 1; - - _symbolicLb[i] = 0; - _symbolicLowerBias[i] = 1; - _lb[i] = 1; - } - else if ( FloatUtils::isNegative( sourceUb ) ) - { - // Phase negative - // Symbolic bound: -1 <= x_f <= -1 - // Concrete bound: -1 <= x_f <= -1 - _symbolicUb[i] = 0; - _symbolicUpperBias[i] = -1; - _ub[i] = -1; - - _symbolicLb[i] = 0; - _symbolicLowerBias[i] = -1; - _lb[i] = -1; + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // Phase positive + // Symbolic bound: 1 <= x_f <= 1 + // Concrete bound: 1 <= x_f <= 1 + _symbolicUb[i] = 0; + _symbolicUpperBias[i] = 1; + _ub[i] = 1; + + _symbolicLb[i] = 0; + _symbolicLowerBias[i] = 1; + _lb[i] = 1; + } + else if ( FloatUtils::isNegative( sourceUb ) ) + { + // Phase negative + // Symbolic bound: -1 <= x_f <= -1 + // Concrete bound: -1 <= x_f <= -1 + _symbolicUb[i] = 0; + _symbolicUpperBias[i] = -1; + _ub[i] = -1; + + _symbolicLb[i] = 0; + _symbolicLowerBias[i] = -1; + _lb[i] = -1; + } + else + { + // Sign not fixed + // Use the relaxation defined in https://arxiv.org/pdf/2011.02948.pdf + // Symbolic upper bound: x_f <= -2 / l * x_b + 1 + // Concrete upper bound: x_f <= 1 + _symbolicUb[i] = -2 / sourceLb; + _symbolicUpperBias[i] = 1; + _ub[i] = 1; + + // Symbolic lower bound: x_f >= (2 / u) * x_b - 1 + // Concrete lower bound: x_f >= -1 + _symbolicLb[i] = 2 / sourceUb; + _symbolicLowerBias[i] = -1; + _lb[i] = -1; + } } else { - // Sign not fixed - // Use the relaxation defined in https://arxiv.org/pdf/2011.02948.pdf - // Symbolic upper bound: x_f <= -2 / l * x_b + 1 - // Concrete upper bound: x_f <= 1 - _symbolicUb[i] = -2 / sourceLb; - _symbolicUpperBias[i] = 1; - _ub[i] = 1; - - // Symbolic lower bound: x_f >= (2 / u) * x_b - 1 - // Concrete lower bound: x_f >= -1 - _symbolicLb[i] = 2 / sourceUb; - _symbolicLowerBias[i] = -1; - _lb[i] = -1; + Vector coeffs = ( *_layerIndicesToParameters )[_layerIndex]; + ASSERT( coeffs.size() == 2 ); + ASSERT( coeffs[0] >= 0 && coeffs[0] <= 1 ); + ASSERT( coeffs[1] >= 0 && coeffs[1] <= 1 ); + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // Phase positive + // Symbolic bound: 1 <= x_f <= 1 + // Concrete bound: 1 <= x_f <= 1 + _symbolicUb[i] = 0; + _symbolicUpperBias[i] = 1; + _ub[i] = 1; + + _symbolicLb[i] = 0; + _symbolicLowerBias[i] = 1; + _lb[i] = 1; + } + else if ( FloatUtils::isNegative( sourceUb ) ) + { + // Phase negative + // Symbolic bound: -1 <= x_f <= -1 + // Concrete bound: -1 <= x_f <= -1 + _symbolicUb[i] = 0; + _symbolicUpperBias[i] = -1; + _ub[i] = -1; + + _symbolicLb[i] = 0; + _symbolicLowerBias[i] = -1; + _lb[i] = -1; + } + else + { + // Sign not fixed + // The upper bound's phase is not fixed, use parameterised + // parallelogram approximation: y <= - 2 / l * coeffs[0] * x + 1 + // (varies continuously between y <= 1 and y <= -2 / l * x + 1). + // Concrete upper bound: x_f <= 1 + _symbolicUb[i] = -2.0 / sourceLb * coeffs[0]; + _symbolicUpperBias[i] = 1; + _ub[i] = 1; + + // The lower bound's phase is not fixed, use parameterised + // parallelogram approximation: y >= 2 / u * coeffs[1] * x - 1 + // (varies continuously between y >= -1 and y >= 2 / u * x - 1). + // Symbolic lower bound: x_f >= (2 / u) * x_b - 1 + // Concrete lower bound: x_f >= -1 + _symbolicLb[i] = 2.0 / sourceUb * coeffs[1]; + _symbolicLowerBias[i] = -1; + _lb[i] = -1; + } } log( Stringf( "Neuron%u LB: %f b + %f, UB: %f b + %f", i, @@ -107,17 +163,15 @@ void DeepPolySignElement::execute( const Map &deepP void DeepPolySignElement::storePredecessorSymbolicBounds() { - double *currentSymbolicLb = ( *_symbolicLbInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicUb = ( *_symbolicUbInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicLowerBias = ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicUpperBias = ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex]; - for ( unsigned i = 0; i < _size; ++i ) { - currentSymbolicLb[i * _size] = _symbolicLb[i]; - currentSymbolicUb[i * _size] = _symbolicUb[i]; - currentSymbolicLowerBias[i] = _symbolicLowerBias[i]; - currentSymbolicUpperBias[i] = _symbolicUpperBias[i]; + NeuronIndex sourceIndex = *( _layer->getActivationSources( i ).begin() ); + ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = + _symbolicLb[i]; + ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = + _symbolicUb[i]; + ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicUpperBias[i]; } } diff --git a/src/nlr/DeepPolySoftmaxElement.cpp b/src/nlr/DeepPolySoftmaxElement.cpp index e61f990b66..1966bf8b4d 100644 --- a/src/nlr/DeepPolySoftmaxElement.cpp +++ b/src/nlr/DeepPolySoftmaxElement.cpp @@ -199,14 +199,17 @@ void DeepPolySoftmaxElement::execute( void DeepPolySoftmaxElement::storePredecessorSymbolicBounds() { - double *currentSymbolicLb = ( *_symbolicLbInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicUb = ( *_symbolicUbInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicLowerBias = ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex]; - double *currentSymbolicUpperBias = ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex]; - memcpy( currentSymbolicLb, _symbolicLb, _size * _size * sizeof( double ) ); - memcpy( currentSymbolicUb, _symbolicUb, _size * _size * sizeof( double ) ); - memcpy( currentSymbolicLowerBias, _symbolicLowerBias, _size * sizeof( double ) ); - memcpy( currentSymbolicUpperBias, _symbolicUpperBias, _size * sizeof( double ) ); + for ( unsigned i = 0; i < _size * _size; ++i ) + { + ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][i] = _symbolicLb[i]; + ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][i] = _symbolicUb[i]; + } + + for ( unsigned i = 0; i < _size; ++i ) + { + ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicUpperBias[i]; + } } void DeepPolySoftmaxElement::symbolicBoundInTermsOfPredecessor( diff --git a/src/nlr/DeepPolyWeightedSumElement.cpp b/src/nlr/DeepPolyWeightedSumElement.cpp index 14362a21d4..b1130f6fa5 100644 --- a/src/nlr/DeepPolyWeightedSumElement.cpp +++ b/src/nlr/DeepPolyWeightedSumElement.cpp @@ -105,8 +105,7 @@ void DeepPolyWeightedSumElement::computeBoundWithBackSubstitution( if ( _storeOutputLayerSymbolicBounds ) { - precedingElement->storeOutputSymbolicBounds( sourceLayerSize, - _work1SymbolicLb, + precedingElement->storeOutputSymbolicBounds( _work1SymbolicLb, _work1SymbolicUb, _workSymbolicLowerBias, _workSymbolicUpperBias, @@ -246,8 +245,7 @@ void DeepPolyWeightedSumElement::computeBoundWithBackSubstitution( if ( _storeOutputLayerSymbolicBounds ) { - precedingElement->storeOutputSymbolicBounds( currentElement->getSize(), - _work1SymbolicLb, + precedingElement->storeOutputSymbolicBounds( _work1SymbolicLb, _work1SymbolicUb, _workSymbolicLowerBias, _workSymbolicUpperBias, diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index f7964de418..67f2725155 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -252,7 +252,7 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map &layers, bool backward, const Map> &layerIndicesToParameters, - const Vector &polygonal_tightenings ) + const Vector &polygonalTightenings ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -306,7 +306,7 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( // optimize every neuron of layer optimizeBoundsOfNeuronsWithLpRelaxation( - argument, backward, layerIndicesToParameters, polygonal_tightenings ); + argument, backward, layerIndicesToParameters, polygonalTightenings ); LPFormulator_LOG( Stringf( "Tightening bound for layer %u - done", layerIndex ).ascii() ); } @@ -417,7 +417,7 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args, bool backward, const Map> &layerIndicesToParameters, - const Vector &polygonal_tightenings ) + const Vector &polygonalTightenings ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -534,13 +534,13 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRelaxation( *freeSolver, lastIndexOfRelaxation, layerIndicesToParameters, - polygonal_tightenings ); + polygonalTightenings ); else createLPRelaxation( layers, *freeSolver, lastIndexOfRelaxation, layerIndicesToParameters, - polygonal_tightenings ); + polygonalTightenings ); mtx.unlock(); // spawn a thread to tighten the bounds for the current variable @@ -665,7 +665,7 @@ void LPFormulator::createLPRelaxation( GurobiWrapper &gurobi, unsigned lastLayer, const Map> &layerIndicesToParameters, - const Vector &polygonal_tightenings ) + const Vector &polygonalTightenings ) { for ( const auto &layer : layers ) { @@ -681,7 +681,7 @@ void LPFormulator::createLPRelaxation( addLayerToParameterisedModel( gurobi, layer.second, false, currentLayerCoeffs ); } } - addPolyognalTighteningsToLpRelaxation( gurobi, layers, 0, lastLayer, polygonal_tightenings ); + addPolyognalTighteningsToLpRelaxation( gurobi, layers, 0, lastLayer, polygonalTightenings ); } void LPFormulator::createLPRelaxationAfter( @@ -689,7 +689,7 @@ void LPFormulator::createLPRelaxationAfter( GurobiWrapper &gurobi, unsigned firstLayer, const Map> &layerIndicesToParameters, - const Vector &polygonal_tightenings ) + const Vector &polygonalTightenings ) { unsigned depth = GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH; std::priority_queue, std::greater> layersToAdd; @@ -726,7 +726,7 @@ void LPFormulator::createLPRelaxationAfter( } } addPolyognalTighteningsToLpRelaxation( - gurobi, layers, firstLayer, layersToAdd.top(), polygonal_tightenings ); + gurobi, layers, firstLayer, layersToAdd.top(), polygonalTightenings ); } void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, @@ -2150,25 +2150,25 @@ void LPFormulator::addPolyognalTighteningsToLpRelaxation( const Map &layers, unsigned firstLayer, unsigned lastLayer, - const Vector &polygonal_tightenings ) + const Vector &polygonalTightenings ) { List terms; - for ( const auto &tightening : polygonal_tightenings ) + for ( const auto &tightening : polygonalTightenings ) { Map neuronToCoefficient = tightening._neuronToCoefficient; PolygonalTightening::PolygonalBoundType type = tightening._type; double value = tightening._value; - bool out_of_bounds = false; + bool outOfBounds = false; for ( const auto &pair : neuronToCoefficient ) { unsigned currentLayerIndex = pair.first._layer; if ( currentLayerIndex < firstLayer || currentLayerIndex > lastLayer ) { - out_of_bounds = true; + outOfBounds = true; } } - if ( out_of_bounds ) + if ( outOfBounds ) { continue; } diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index 76e0cd0b10..6b4ef5c7b3 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -57,7 +57,7 @@ class LPFormulator : public ParallelSolver bool backward = false, const Map> &layerIndicesToParameters = Map>(), - const Vector &polygonal_tightenings = + const Vector &polygonalTightenings = Vector( {} ) ); void optimizeBoundsWithPreimageApproximation( Map &layers ); void optimizeBoundsWithInvprop( Map &layers ); @@ -84,14 +84,14 @@ class LPFormulator : public ParallelSolver unsigned lastLayer = UINT_MAX, const Map> &layerIndicesToParameters = Map>(), - const Vector &polygonal_tightenings = + const Vector &polygonalTightenings = Vector( {} ) ); void createLPRelaxationAfter( const Map &layers, GurobiWrapper &gurobi, unsigned firstLayer, const Map> &layerIndicesToParameters = Map>(), - const Vector &polygonal_tightenings = + const Vector &polygonalTightenings = Vector( {} ) ); double solveLPRelaxation( GurobiWrapper &gurobi, const Map &layers, @@ -149,7 +149,7 @@ class LPFormulator : public ParallelSolver bool backward, const Map> &layerIndicesToParameters = Map>(), - const Vector &polygonal_tightenings = + const Vector &polygonalTightenings = Vector( {} ) ); @@ -184,7 +184,7 @@ class LPFormulator : public ParallelSolver const Map &layers, unsigned firstLayer, unsigned lastLayer, - const Vector &polygonal_tightenings ); + const Vector &polygonalTightenings ); /* Optimize for the min/max value of variableName with respect to the constraints diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 62af5e8e91..43f24e341b 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -936,23 +936,23 @@ void Layer::computeIntervalArithmeticBoundsForSign() double lb = sourceLayer->getLb( sourceIndex._neuron ); double ub = sourceLayer->getUb( sourceIndex._neuron ); - double new_lb; - double new_ub; + double newLb; + double newUb; if ( !FloatUtils::isNegative( lb ) ) { - new_lb = 1; - new_ub = 1; + newLb = 1; + newUb = 1; } else if ( FloatUtils::isNegative( ub ) ) { - new_lb = -1; - new_ub = -1; + newLb = -1; + newUb = -1; } else { - new_lb = -1; - new_ub = 1; + newLb = -1; + newUb = 1; } /* @@ -960,16 +960,16 @@ void Layer::computeIntervalArithmeticBoundsForSign() variable. If they are tigheter than what was previously known, store them. */ - if ( _lb[i] < new_lb ) + if ( _lb[i] < newLb ) { - _lb[i] = new_lb; + _lb[i] = newLb; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( _ub[i] > new_ub ) + if ( _ub[i] > newUb ) { - _ub[i] = new_ub; + _ub[i] = newUb; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); } @@ -4160,57 +4160,65 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector } // Billinear linear relaxation (arXiv:2405.21063v2 [cs.LG]) - // Lower bound: out >= a_l * x + b_l * y + c_l, where - // a_l = alpha1 * l_y + ( 1 - alpha1 ) * u_y - // b_l = alpha1 * l_x + ( 1 - alpha1 ) * u_x - // c_l = -alpha1 * l_x * l_y - ( 1 - alpha1 ) * u_x * u_y + // Lower bound: out >= aLower * x + bLower * y + c_l, where + // aLower = alpha1 * l_y + ( 1 - alpha1 ) * u_y + // bLower = alpha1 * l_x + ( 1 - alpha1 ) * u_x + // cLower = -alpha1 * l_x * l_y - ( 1 - alpha1 ) * u_x * u_y - // Upper bound: out <= a_u * x + b_u * y + c_u, where - // a_u = alpha2 * u_y + ( 1 - alpha2 ) * l_y - // b_u = alpha2 * l_x + ( 1 - alpha2 ) * u_x - // c_u = -alpha2 * -l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + // Upper bound: out <= aUpper * x + bUpper * y + c_u, where + // aUpper = alpha2 * u_y + ( 1 - alpha2 ) * l_y + // bUpper = alpha2 * l_x + ( 1 - alpha2 ) * u_x + // cUpper = -alpha2 * l_x * u_y - ( 1 - alpha2 ) * u_x * l_y - double a_l = coeffs[0] * sourceLbs[1] + ( 1 - coeffs[0] ) * sourceUbs[1]; - double a_u = coeffs[1] * sourceUbs[1] + ( 1 - coeffs[1] ) * sourceLbs[1]; - double b_l = coeffs[0] * sourceLbs[0] + ( 1 - coeffs[0] ) * sourceUbs[0]; - double b_u = coeffs[1] * sourceLbs[0] + ( 1 - coeffs[1] ) * sourceUbs[0]; + double aLower = coeffs[0] * sourceLbs[1] + ( 1 - coeffs[0] ) * sourceUbs[1]; + double aUpper = coeffs[1] * sourceUbs[1] + ( 1 - coeffs[1] ) * sourceLbs[1]; + double bLower = coeffs[0] * sourceLbs[0] + ( 1 - coeffs[0] ) * sourceUbs[0]; + double bUpper = coeffs[1] * sourceLbs[0] + ( 1 - coeffs[1] ) * sourceUbs[0]; for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - if ( a_l >= 0 ) + if ( aLower >= 0 ) { - _symbolicLb[j * _size + i] += a_l * sourceSymbolicLb[j * sourceLayerSize + indexA]; + _symbolicLb[j * _size + i] += + aLower * sourceSymbolicLb[j * sourceLayerSize + indexA]; } else { - _symbolicLb[j * _size + i] += a_l * sourceSymbolicUb[j * sourceLayerSize + indexA]; + _symbolicLb[j * _size + i] += + aLower * sourceSymbolicUb[j * sourceLayerSize + indexA]; } - if ( a_u >= 0 ) + if ( aUpper >= 0 ) { - _symbolicUb[j * _size + i] += a_u * sourceSymbolicUb[j * sourceLayerSize + indexA]; + _symbolicUb[j * _size + i] += + aUpper * sourceSymbolicUb[j * sourceLayerSize + indexA]; } else { - _symbolicUb[j * _size + i] += a_u * sourceSymbolicLb[j * sourceLayerSize + indexA]; + _symbolicUb[j * _size + i] += + aUpper * sourceSymbolicLb[j * sourceLayerSize + indexA]; } - if ( b_l >= 0 ) + if ( bLower >= 0 ) { - _symbolicLb[j * _size + i] += b_l * sourceSymbolicLb[j * sourceLayerSize + indexB]; + _symbolicLb[j * _size + i] += + bLower * sourceSymbolicLb[j * sourceLayerSize + indexB]; } else { - _symbolicLb[j * _size + i] += b_l * sourceSymbolicUb[j * sourceLayerSize + indexB]; + _symbolicLb[j * _size + i] += + bLower * sourceSymbolicUb[j * sourceLayerSize + indexB]; } - if ( b_l >= 0 ) + if ( bLower >= 0 ) { - _symbolicUb[j * _size + i] += b_u * sourceSymbolicUb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += + bUpper * sourceSymbolicUb[j * sourceLayerSize + indexB]; } else { - _symbolicUb[j * _size + i] += b_u * sourceSymbolicLb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += + bUpper * sourceSymbolicLb[j * sourceLayerSize + indexB]; } } diff --git a/src/nlr/NLRError.h b/src/nlr/NLRError.h index eb19fa05bd..69016ea8c1 100644 --- a/src/nlr/NLRError.h +++ b/src/nlr/NLRError.h @@ -27,7 +27,8 @@ class NLRError : public Error INPUT_LAYER_NOT_THE_FIRST_LAYER = 2, LEAKY_RELU_SLOPES_NOT_UNIFORM = 3, RELU_NOT_FOUND = 4, - LAYER_NOT_FOUND = 5 + LAYER_NOT_FOUND = 5, + NEURON_NOT_FOUND = 5, }; NLRError( NLRError::Code code ) diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index bf4a3642f1..9b208d5e2c 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -202,15 +202,15 @@ void NetworkLevelReasoner::clearConstraintTightenings() _boundTightenings.clear(); } -void NetworkLevelReasoner::receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ) +void NetworkLevelReasoner::receivePolygonalTighterBound( PolygonalTightening polygonalTightening ) { - _polygonalBoundTightenings.append( polygonal_tightening ); + _polygonalBoundTightenings.append( polygonalTightening ); } void NetworkLevelReasoner::getConstraintPolygonalTightenings( - List &polygonal_tightenings ) + List &polygonalTightenings ) { - polygonal_tightenings = _polygonalBoundTightenings; + polygonalTightenings = _polygonalBoundTightenings; _polygonalBoundTightenings.clear(); } @@ -227,8 +227,7 @@ void NetworkLevelReasoner::symbolicBoundPropagation() void NetworkLevelReasoner::parameterisedSymbolicBoundPropagation( const Vector &coeffs ) { - Map> layerIndicesToParameters = - getParametersForLayers( _layerIndexToLayer, coeffs ); + Map> layerIndicesToParameters = getParametersForLayers( coeffs ); for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) { const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; @@ -255,17 +254,16 @@ void NetworkLevelReasoner::lpRelaxationPropagation() lpFormulator.optimizeBoundsWithLpRelaxation( _layerIndexToLayer, true ); else if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) - optimizeBoundsWithPreimageApproximation( _layerIndexToLayer, lpFormulator ); - else if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP ) - optimizeBoundsWithPMNR( _layerIndexToLayer, lpFormulator ); + optimizeBoundsWithPreimageApproximation( lpFormulator ); else if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP || + Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT || Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) - optimizeBoundsWithPMNR( _layerIndexToLayer, lpFormulator ); + optimizeBoundsWithPMNR( lpFormulator ); else if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::LP_RELAXATION ) lpFormulator.optimizeBoundsWithLpRelaxation( _layerIndexToLayer ); @@ -274,28 +272,28 @@ void NetworkLevelReasoner::lpRelaxationPropagation() lpFormulator.optimizeBoundsWithIncrementalLpRelaxation( _layerIndexToLayer ); } -void NetworkLevelReasoner::optimizeBoundsWithPreimageApproximation( Map &layers, - LPFormulator &lpFormulator ) +void NetworkLevelReasoner::optimizeBoundsWithPreimageApproximation( LPFormulator &lpFormulator ) { - const Vector &optimal_coeffs = OptimalParameterisedSymbolicBoundTightening(); + const Vector &optimalCoeffs = OptimalParameterisedSymbolicBoundTightening(); Map> layerIndicesToParameters = - getParametersForLayers( layers, optimal_coeffs ); - lpFormulator.optimizeBoundsWithLpRelaxation( layers, false, layerIndicesToParameters ); - lpFormulator.optimizeBoundsWithLpRelaxation( layers, true, layerIndicesToParameters ); + getParametersForLayers( optimalCoeffs ); + lpFormulator.optimizeBoundsWithLpRelaxation( + _layerIndexToLayer, false, layerIndicesToParameters ); + lpFormulator.optimizeBoundsWithLpRelaxation( + _layerIndexToLayer, true, layerIndicesToParameters ); } -void NetworkLevelReasoner::optimizeBoundsWithPMNR( Map &layers, - LPFormulator &lpFormulator ) +void NetworkLevelReasoner::optimizeBoundsWithPMNR( LPFormulator &lpFormulator ) { - const Vector &polygonal_tightenings = + const Vector &polygonalTightenings = OptimizeParameterisedPolygonalTightening(); - const Vector &coeffs = Vector( {} ); - Map> layerIndicesToParameters = - getParametersForLayers( layers, coeffs ); + unsigned parameterCount = getNumberOfParameters(); + const Vector &coeffs = Vector( parameterCount, 0 ); + Map> layerIndicesToParameters = getParametersForLayers( coeffs ); lpFormulator.optimizeBoundsWithLpRelaxation( - layers, false, layerIndicesToParameters, polygonal_tightenings ); + _layerIndexToLayer, false, layerIndicesToParameters, polygonalTightenings ); lpFormulator.optimizeBoundsWithLpRelaxation( - layers, true, layerIndicesToParameters, polygonal_tightenings ); + _layerIndexToLayer, true, layerIndicesToParameters, polygonalTightenings ); } void NetworkLevelReasoner::LPTighteningForOneLayer( unsigned targetIndex ) @@ -353,8 +351,6 @@ void NetworkLevelReasoner::freeMemoryIfNeeded() for ( const auto &layer : _layerIndexToLayer ) delete layer.second; _layerIndexToLayer.clear(); - - freeMemoryForSymbolicBoundMapsIfNeeded(); } void NetworkLevelReasoner::storeIntoOther( NetworkLevelReasoner &other ) const @@ -741,7 +737,7 @@ double NetworkLevelReasoner::getPreviousBias( const ReluConstraint *reluConstrai return _previousBiases[reluConstraint]; } -const double *NetworkLevelReasoner::getOutputLayerSymbolicLb( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getOutputLayerSymbolicLb( unsigned layerIndex ) const { // Initialize map if empty. if ( _outputLayerSymbolicLb.empty() ) @@ -751,13 +747,14 @@ const double *NetworkLevelReasoner::getOutputLayerSymbolicLb( unsigned layerInde if ( !_outputLayerSymbolicLb.exists( layerIndex ) ) { - throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in symbolic bounds map." ); + throw NLRError( NLRError::LAYER_NOT_FOUND, + "Layer not found in output layer symbolic bounds map." ); } return _outputLayerSymbolicLb[layerIndex]; } -const double *NetworkLevelReasoner::getOutputLayerSymbolicUb( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getOutputLayerSymbolicUb( unsigned layerIndex ) const { // Initialize map if empty. if ( _outputLayerSymbolicUb.empty() ) @@ -767,13 +764,14 @@ const double *NetworkLevelReasoner::getOutputLayerSymbolicUb( unsigned layerInde if ( !_outputLayerSymbolicUb.exists( layerIndex ) ) { - throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in symbolic bounds map." ); + throw NLRError( NLRError::LAYER_NOT_FOUND, + "Layer not found in output layer symbolic bounds map." ); } return _outputLayerSymbolicUb[layerIndex]; } -const double *NetworkLevelReasoner::getOutputLayerSymbolicLowerBias( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getOutputLayerSymbolicLowerBias( unsigned layerIndex ) const { // Initialize map if empty. if ( _outputLayerSymbolicLowerBias.empty() ) @@ -783,13 +781,14 @@ const double *NetworkLevelReasoner::getOutputLayerSymbolicLowerBias( unsigned la if ( !_outputLayerSymbolicLowerBias.exists( layerIndex ) ) { - throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in symbolic bounds map." ); + throw NLRError( NLRError::LAYER_NOT_FOUND, + "Layer not found in output layer symbolic bounds map." ); } return _outputLayerSymbolicLowerBias[layerIndex]; } -const double *NetworkLevelReasoner::getOutputLayerSymbolicUpperBias( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getOutputLayerSymbolicUpperBias( unsigned layerIndex ) const { // Initialize map if empty. if ( _outputLayerSymbolicUpperBias.empty() ) @@ -799,13 +798,14 @@ const double *NetworkLevelReasoner::getOutputLayerSymbolicUpperBias( unsigned la if ( !_outputLayerSymbolicUpperBias.exists( layerIndex ) ) { - throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in symbolic bounds map." ); + throw NLRError( NLRError::LAYER_NOT_FOUND, + "Layer not found in output layer symbolic bounds map." ); } return _outputLayerSymbolicUpperBias[layerIndex]; } -const double *NetworkLevelReasoner::getSymbolicLbInTermsOfPredecessor( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getSymbolicLbInTermsOfPredecessor( unsigned layerIndex ) const { // Initialize map if empty. if ( _symbolicLbInTermsOfPredecessor.empty() ) @@ -815,13 +815,14 @@ const double *NetworkLevelReasoner::getSymbolicLbInTermsOfPredecessor( unsigned if ( !_symbolicLbInTermsOfPredecessor.exists( layerIndex ) ) { - throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in symbolic bounds map." ); + throw NLRError( NLRError::LAYER_NOT_FOUND, + "Layer not found in predecessor layer symbolic bounds map." ); } return _symbolicLbInTermsOfPredecessor[layerIndex]; } -const double *NetworkLevelReasoner::getSymbolicUbInTermsOfPredecessor( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getSymbolicUbInTermsOfPredecessor( unsigned layerIndex ) const { // Initialize map if empty. if ( _symbolicUbInTermsOfPredecessor.empty() ) @@ -831,13 +832,14 @@ const double *NetworkLevelReasoner::getSymbolicUbInTermsOfPredecessor( unsigned if ( !_symbolicUbInTermsOfPredecessor.exists( layerIndex ) ) { - throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in symbolic bounds map." ); + throw NLRError( NLRError::LAYER_NOT_FOUND, + "Layer not found in predecessor layer symbolic bounds map." ); } return _symbolicUbInTermsOfPredecessor[layerIndex]; } -const double * +Vector NetworkLevelReasoner::getSymbolicLowerBiasInTermsOfPredecessor( unsigned layerIndex ) const { // Initialize map if empty. @@ -848,13 +850,14 @@ NetworkLevelReasoner::getSymbolicLowerBiasInTermsOfPredecessor( unsigned layerIn if ( !_symbolicLowerBiasInTermsOfPredecessor.exists( layerIndex ) ) { - throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in symbolic bounds map." ); + throw NLRError( NLRError::LAYER_NOT_FOUND, + "Layer not found in predecessor layer symbolic bounds map." ); } return _symbolicLowerBiasInTermsOfPredecessor[layerIndex]; } -const double * +Vector NetworkLevelReasoner::getSymbolicUpperBiasInTermsOfPredecessor( unsigned layerIndex ) const { // Initialize map if empty. @@ -865,12 +868,46 @@ NetworkLevelReasoner::getSymbolicUpperBiasInTermsOfPredecessor( unsigned layerIn if ( !_symbolicUpperBiasInTermsOfPredecessor.exists( layerIndex ) ) { - throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in symbolic bounds map." ); + throw NLRError( NLRError::LAYER_NOT_FOUND, + "Layer not found in predecessor layer symbolic bounds map." ); } return _symbolicUpperBiasInTermsOfPredecessor[layerIndex]; } +Map NetworkLevelReasoner::getBBPSBranchingPoint( NeuronIndex index ) const +{ + // Initialize map if empty. + if ( _neuronToBBPSBranchingPoints.empty() ) + { + const_cast( this )->initializeBBPSMaps(); + } + + if ( !_neuronToBBPSBranchingPoints.exists( index ) ) + { + throw NLRError( NLRError::NEURON_NOT_FOUND, + "Neuron not found in BBPS branching points map." ); + } + + return _neuronToBBPSBranchingPoints[index]; +} + +double NetworkLevelReasoner::getBBPSScore( NeuronIndex index ) const +{ + // Initialize map if empty. + if ( _neuronToBBPSScores.empty() ) + { + const_cast( this )->initializeBBPSMaps(); + } + + if ( !_neuronToBBPSScores.exists( index ) ) + { + throw NLRError( NLRError::NEURON_NOT_FOUND, "Neuron not found in BBPS scores map." ); + } + + return _neuronToBBPSScores[index]; +} + unsigned NetworkLevelReasoner::mergeConsecutiveWSLayers( const Map &lowerBounds, const Map &upperBounds, @@ -957,108 +994,109 @@ bool NetworkLevelReasoner::suitableForMerging( const Vector NetworkLevelReasoner::OptimalParameterisedSymbolicBoundTightening() { - const Map &layers = getLayerIndexToLayer(); - // Search over coeffs in [0, 1]^number_of_parameters with projected grdient descent. - unsigned max_iterations = + unsigned maxIterations = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; - double step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + double stepSize = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; double epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; - double weight_decay = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY; + double weightDecay = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY; double lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; - unsigned dimension = getNumberOfParameters( layers ); + unsigned dimension = getNumberOfParameters(); bool maximize = false; - int sign = ( maximize ? -1 : 1 ); - Vector lower_bounds( dimension, 0 ); - Vector upper_bounds( dimension, 1 ); + Vector lowerBounds( dimension ); + Vector upperBounds( dimension ); + for ( size_t j = 0; j < dimension; ++j ) + { + lowerBounds[j] = 0; + upperBounds[j] = 1; + } // Initialize initial guess uniformly. Vector guess( dimension ); std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); std::uniform_real_distribution dis( 0, 1 ); - for ( unsigned j = 0; j < dimension; ++j ) + for ( size_t j = 0; j < dimension; ++j ) { - double lb = lower_bounds[j]; - double ub = upper_bounds[j]; + double lb = lowerBounds[j]; + double ub = upperBounds[j]; guess[j] = lb + dis( rng ) * ( ub - lb ); } Vector> candidates( dimension ); Vector gradient( dimension ); - for ( unsigned i = 0; i < max_iterations; ++i ) + for ( size_t i = 0; i < maxIterations; ++i ) { - double current_cost = EstimateVolume( layers, guess ); - for ( unsigned j = 0; j < dimension; ++j ) + double currentCost = EstimateVolume( guess ); + for ( size_t j = 0; j < dimension; ++j ) { candidates[j] = Vector( guess ); - candidates[j][j] += step_size; + candidates[j][j] += stepSize; - if ( candidates[j][j] > upper_bounds[j] || candidates[j][j] < lower_bounds[j] ) + if ( candidates[j][j] > upperBounds[j] || candidates[j][j] < lowerBounds[j] ) { gradient[j] = 0; continue; } - double cost = EstimateVolume( layers, candidates[j] ); - gradient[j] = sign * ( cost - current_cost ) / step_size + weight_decay * guess[j]; + size_t sign = ( maximize == false ? 1 : -1 ); + double cost = EstimateVolume( candidates[j] ); + gradient[j] = sign * ( cost - currentCost ) / stepSize + weightDecay * guess[j]; } - bool gradient_is_zero = true; - for ( unsigned j = 0; j < dimension; ++j ) + bool gradientIsZero = true; + for ( size_t j = 0; j < dimension; ++j ) { if ( FloatUtils::abs( gradient[j] ) > epsilon ) { - gradient_is_zero = false; + gradientIsZero = false; } } - if ( gradient_is_zero ) + if ( gradientIsZero ) { break; } - for ( unsigned j = 0; j < dimension; ++j ) + for ( size_t j = 0; j < dimension; ++j ) { guess[j] -= lr * gradient[j]; - if ( guess[j] > upper_bounds[j] ) + if ( guess[j] > upperBounds[j] ) { - guess[j] = upper_bounds[j]; + guess[j] = upperBounds[j]; } - if ( guess[j] < lower_bounds[j] ) + if ( guess[j] < lowerBounds[j] ) { - guess[j] = lower_bounds[j]; + guess[j] = lowerBounds[j]; } } } - const Vector optimal_coeffs( guess ); - return optimal_coeffs; + const Vector optimalCoeffs( guess ); + return optimalCoeffs; } -double NetworkLevelReasoner::EstimateVolume( const Map &layers, - const Vector &coeffs ) +double NetworkLevelReasoner::EstimateVolume( const Vector &coeffs ) { // First, run parameterised symbolic bound propagation. - Map> layerIndicesToParameters = - getParametersForLayers( layers, coeffs ); - for ( unsigned i = 0; i < layers.size(); ++i ) + Map> layerIndicesToParameters = getParametersForLayers( coeffs ); + for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) { - ASSERT( layers.exists( i ) ); + ASSERT( _layerIndexToLayer.exists( i ) ); const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; - layers[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); + _layerIndexToLayer[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); } std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); - double log_box_volume = 0; - double sigmoid_sum = 0; + double logBoxVolume = 0; + double sigmoidSum = 0; unsigned inputLayerIndex = 0; - unsigned outputLayerIndex = layers.size() - 1; - Layer *inputLayer = layers[inputLayerIndex]; - Layer *outputLayer = layers[outputLayerIndex]; + unsigned outputLayerIndex = _layerIndexToLayer.size() - 1; + Layer *inputLayer = _layerIndexToLayer[inputLayerIndex]; + Layer *outputLayer = _layerIndexToLayer[outputLayerIndex]; // Calculate volume of input variables' bounding box. for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) @@ -1072,7 +1110,7 @@ double NetworkLevelReasoner::EstimateVolume( const Map &layer if ( lb == ub ) continue; - log_box_volume += std::log( ub - lb ); + logBoxVolume += std::log( ub - lb ); } for ( unsigned i = 0; i < GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; ++i ) @@ -1095,19 +1133,19 @@ double NetworkLevelReasoner::EstimateVolume( const Map &layer } // Calculate sigmoid of maximum margin from output symbolic bounds. - double max_margin = 0; + double maxMargin = 0; for ( unsigned j = 0; j < outputLayer->getSize(); ++j ) { if ( outputLayer->neuronEliminated( j ) ) continue; double margin = calculateDifferenceFromSymbolic( outputLayer, point, j ); - max_margin = std::max( max_margin, margin ); + maxMargin = std::max( maxMargin, margin ); } - sigmoid_sum += SigmoidConstraint::sigmoid( max_margin ); + sigmoidSum += SigmoidConstraint::sigmoid( maxMargin ); } - return std::exp( log_box_volume + std::log( sigmoid_sum ) ) / + return std::exp( logBoxVolume + std::log( sigmoidSum ) ) / GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; } @@ -1131,16 +1169,16 @@ double NetworkLevelReasoner::calculateDifferenceFromSymbolic( const Layer *layer const Vector NetworkLevelReasoner::OptimizeParameterisedPolygonalTightening() { - const Map &layers = getLayerIndexToLayer(); - const Vector &selectedTightenings = generatePolygonalTightenings( layers ); + computeSuccessorLayers(); + const Vector &selectedTightenings = generatePolygonalTightenings(); const unsigned size = selectedTightenings.size(); Vector optimizedTightenings = Vector( {} ); for ( unsigned i = 0; i < size; ++i ) { PolygonalTightening tightening = selectedTightenings[i]; - double lower_bound = OptimizeSingleParameterisedPolygonalTightening( - layers, tightening, optimizedTightenings ); - tightening._value = lower_bound; + double lowerBound = + OptimizeSingleParameterisedPolygonalTightening( tightening, optimizedTightenings ); + tightening._value = lowerBound; optimizedTightenings.append( tightening ); } const Vector tightenings( optimizedTightenings ); @@ -1148,159 +1186,147 @@ const Vector NetworkLevelReasoner::OptimizeParameterisedPol } double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTightening( - const Map &layers, PolygonalTightening &tightening, Vector &prevTightenings ) { - // Search over coeffs in [0, 1]^number_of_parameters, gamma in - // [0, inf)^size_of_prevTightenings with PGD. - unsigned max_iterations = GlobalConfiguration::INVPROP_MAX_ITERATIONS; - double step_size = GlobalConfiguration::INVPROP_STEP_SIZE; + // Search over coeffs in [0, 1]^numberOfParameters, gamma in + // [0, inf)^sizeOfPrevTightenings with PGD. + // unsigned maxIterations = GlobalConfiguration::INVPROP_MAX_ITERATIONS; + unsigned maxIterations = 1000; + // double coeffsStepSize = GlobalConfiguration::INVPROP_STEP_SIZE; + // double gammaStepSize = GlobalConfiguration::INVPROP_STEP_SIZE; + double coeffsStepSize = 0.025; + double gammaStepSize = 0.0025; double epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; - double weight_decay = GlobalConfiguration::INVPROP_WEIGHT_DECAY; + double weightDecay = GlobalConfiguration::INVPROP_WEIGHT_DECAY; double lr = GlobalConfiguration::INVPROP_LEARNING_RATE; - unsigned coeffs_dimension = getNumberOfParameters( layers ); - unsigned gamma_dimension = prevTightenings.size(); + unsigned coeffsDimension = getNumberOfParameters(); + unsigned gammaDimension = prevTightenings.size(); bool maximize = ( tightening._type == PolygonalTightening::LB ); - double best_bound = 0; - int sign = ( maximize ? -1 : 1 ); + int sign = ( maximize ? 1 : -1 ); + double bestBound = sign * tightening._value; - Vector coeffs_lower_bounds( coeffs_dimension, 0 ); - Vector coeffs_upper_bounds( coeffs_dimension, 1 ); - Vector gamma_lower_bounds( gamma_dimension, 0 ); + Vector coeffsLowerBounds( coeffsDimension, 0 ); + Vector coeffsUpperBounds( coeffsDimension, 1 ); + Vector gammaLowerBounds( gammaDimension, 0 ); - Vector coeffs( coeffs_dimension, GlobalConfiguration::INVPROP_INITIAL_ALPHA ); - Vector gamma( gamma_dimension, GlobalConfiguration::INVPROP_INITIAL_GAMMA ); + Vector coeffs( coeffsDimension, GlobalConfiguration::INVPROP_INITIAL_ALPHA ); + Vector gamma( gammaDimension, GlobalConfiguration::INVPROP_INITIAL_GAMMA ); - Vector> coeffs_candidates( coeffs_dimension ); - Vector coeffs_gradient( coeffs_dimension ); - Vector> gamma_candidates( gamma_dimension ); - Vector gamma_gradient( gamma_dimension ); + Vector> coeffsCandidates( coeffsDimension ); + Vector coeffsGradient( coeffsDimension ); + Vector> gammaCandidates( gammaDimension ); + Vector gammaGradient( gammaDimension ); - for ( unsigned i = 0; i < max_iterations; ++i ) + for ( unsigned i = 0; i < maxIterations; ++i ) { double cost = getParameterisdPolygonalTighteningLowerBound( - layers, coeffs, gamma, tightening, prevTightenings ); - for ( unsigned j = 0; j < coeffs_dimension; ++j ) + coeffs, gamma, tightening, prevTightenings ); + for ( unsigned j = 0; j < coeffsDimension; ++j ) { - coeffs_candidates[j] = Vector( coeffs ); - coeffs_candidates[j][j] += step_size; + coeffsCandidates[j] = Vector( coeffs ); + coeffsCandidates[j][j] += coeffsStepSize; - if ( coeffs_candidates[j][j] > coeffs_upper_bounds[j] || - coeffs_candidates[j][j] < coeffs_lower_bounds[j] ) + if ( coeffs[j] <= coeffsLowerBounds[j] || coeffs[j] >= coeffsUpperBounds[j] || + coeffsCandidates[j][j] > coeffsUpperBounds[j] || + coeffsCandidates[j][j] < coeffsLowerBounds[j] ) { - coeffs_gradient[j] = 0; + coeffsGradient[j] = 0; continue; } - double current_cost = getParameterisdPolygonalTighteningLowerBound( - layers, coeffs_candidates[j], gamma, tightening, prevTightenings ); - coeffs_gradient[j] = - sign * ( cost - current_cost ) / step_size + weight_decay * coeffs[j]; - best_bound = - ( maximize ? std::max( cost, current_cost ) : std::min( cost, current_cost ) ); + double currentCost = getParameterisdPolygonalTighteningLowerBound( + coeffsCandidates[j], gamma, tightening, prevTightenings ); + coeffsGradient[j] = ( currentCost - cost ) / coeffsStepSize + weightDecay * coeffs[j]; + bestBound = ( maximize ? std::max( bestBound, currentCost ) + : std::min( bestBound, currentCost ) ); } - for ( unsigned j = 0; j < gamma_dimension; ++j ) + for ( unsigned j = 0; j < gammaDimension; ++j ) { - gamma_candidates[j] = Vector( gamma ); - gamma_candidates[j][j] += step_size; + gammaCandidates[j] = Vector( gamma ); + gammaCandidates[j][j] += gammaStepSize; - if ( gamma_candidates[j][j] < gamma_lower_bounds[j] ) + if ( gamma[j] <= gammaLowerBounds[j] || gammaCandidates[j][j] < gammaLowerBounds[j] ) { - gamma_gradient[j] = 0; + gammaGradient[j] = 0; continue; } - double current_cost = getParameterisdPolygonalTighteningLowerBound( - layers, coeffs, gamma_candidates[j], tightening, prevTightenings ); - gamma_gradient[j] = - sign * ( cost - current_cost ) / step_size + weight_decay * gamma[j]; - best_bound = - ( maximize ? std::max( cost, current_cost ) : std::min( cost, current_cost ) ); + double currentCost = getParameterisdPolygonalTighteningLowerBound( + coeffs, gammaCandidates[j], tightening, prevTightenings ); + gammaGradient[j] = ( currentCost - cost ) / gammaStepSize + weightDecay * gamma[j]; + bestBound = ( maximize ? std::max( bestBound, currentCost ) + : std::min( bestBound, currentCost ) ); } - bool gradient_is_zero = true; - for ( unsigned j = 0; j < coeffs_dimension; ++j ) + bool gradientIsZero = true; + for ( unsigned j = 0; j < coeffsDimension; ++j ) { - if ( FloatUtils::abs( coeffs_gradient[j] ) > epsilon ) + if ( FloatUtils::abs( coeffsGradient[j] ) > epsilon ) { - gradient_is_zero = false; + gradientIsZero = false; } } - for ( unsigned j = 0; j < gamma_dimension; ++j ) + for ( unsigned j = 0; j < gammaDimension; ++j ) { - if ( FloatUtils::abs( gamma_gradient[j] ) > epsilon ) + if ( FloatUtils::abs( gammaGradient[j] ) > epsilon ) { - gradient_is_zero = false; + gradientIsZero = false; } } - if ( gradient_is_zero ) + if ( gradientIsZero ) { break; } - for ( unsigned j = 0; j < coeffs_dimension; ++j ) + for ( unsigned j = 0; j < coeffsDimension; ++j ) { - coeffs[j] -= lr * coeffs_gradient[j]; - gamma[j] -= lr * gamma_gradient[j]; - - if ( coeffs[j] > coeffs_upper_bounds[j] ) - { - coeffs[j] = coeffs_upper_bounds[j]; - } - - if ( coeffs[j] < coeffs_lower_bounds[j] ) - { - coeffs[j] = coeffs_lower_bounds[j]; - } - - if ( gamma[j] < gamma_lower_bounds[j] ) - { - gamma[j] = gamma_lower_bounds[j]; - } + coeffs[j] += sign * lr * coeffsGradient[j]; + coeffs[j] = std::min( coeffs[j], coeffsUpperBounds[j] ); + coeffs[j] = std::max( coeffs[j], coeffsLowerBounds[j] ); + } + for ( unsigned j = 0; j < gammaDimension; ++j ) + { + gamma[j] += sign * lr * gammaGradient[j]; + gamma[j] = std::max( gamma[j], gammaLowerBounds[j] ); } } - return best_bound; + return bestBound; } double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( - const Map &layers, const Vector &coeffs, const Vector &gamma, PolygonalTightening &tightening, Vector &prevTightenings ) { // First, run parameterised symbolic bound propagation and compue successor layers. - Map> layerIndicesToParameters = - getParametersForLayers( _layerIndexToLayer, coeffs ); + Map> layerIndicesToParameters = getParametersForLayers( coeffs ); for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) { const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; _layerIndexToLayer[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); } - computeSuccessorLayers(); - // Recursively compute mu, mu_hat for every layer. - const unsigned numLayers = layers.size(); - const unsigned maxLayer = layers.size() - 1; - const unsigned numCoeffs = coeffs.size(); - const unsigned numGamma = gamma.size(); - const unsigned inputLayerSize = layers[0]->getSize(); + // Recursively compute mu, muHat for every layer. + const unsigned numLayers = _layerIndexToLayer.size(); + const unsigned maxLayer = _layerIndexToLayer.size() - 1; + const unsigned prevTigheningsCount = prevTightenings.size(); + const unsigned inputLayerSize = _layerIndexToLayer[0]->getSize(); Vector> mu( numLayers ); - Vector> mu_hat( numLayers ); - Set handledInputNeurons; + Vector> muHat( numLayers ); - for ( unsigned index = maxLayer; index > 0; --index ) + for ( int index = maxLayer; index >= 0; --index ) { - Layer *layer = layers[index]; + Layer *layer = _layerIndexToLayer[index]; const unsigned layerSize = layer->getSize(); const unsigned layerIndex = layer->getLayerIndex(); - mu[index] = Vector( layerSize, 0 ); - mu_hat[index] = Vector( layerSize, 0 ); + mu[layerIndex] = Vector( layerSize, 0 ); + muHat[layerIndex] = Vector( layerSize, 0 ); if ( layerIndex < maxLayer ) { @@ -1315,23 +1341,21 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( if ( successorLayerType == Layer::WEIGHTED_SUM ) { - const double *successorWeights = successorLayer->getWeights( layerIndex ); + const double *successorWeights = + successorLayer->getWeightMatrix( layerIndex ); for ( unsigned j = 0; j < successorLayerSize; ++j ) { if ( !successorLayer->neuronEliminated( j ) ) { - mu_hat[index][i] += mu[successorLayerIndex][j] * - successorWeights[i * successorLayerSize + j]; + muHat[layerIndex][i] += + mu[successorLayerIndex][j] * + successorWeights[i * successorLayerSize + j]; } } } else { - const double *successorSymbolicLbInTermsOfPredecessor = - _symbolicLbInTermsOfPredecessor[successorLayerIndex]; - const double *successorSymbolicUbInTermsOfPredecessor = - _symbolicUbInTermsOfPredecessor[successorLayerIndex]; for ( unsigned j = 0; j < successorLayerSize; ++j ) { // Find the index of the current neuron in the activation source list. @@ -1359,17 +1383,17 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( { if ( mu[successorLayerIndex][j] >= 0 ) { - mu_hat[index][i] += + muHat[layerIndex][i] += mu[successorLayerIndex][j] * - successorSymbolicUbInTermsOfPredecessor - [j * successorLayerSize + predecessorIndex]; + getSymbolicUbInTermsOfPredecessor( successorLayerIndex ) + [successorLayerSize * predecessorIndex + j]; } else { - mu_hat[index][i] -= + muHat[layerIndex][i] -= mu[successorLayerIndex][j] * - successorSymbolicLbInTermsOfPredecessor - [j * successorLayerSize + predecessorIndex]; + getSymbolicLbInTermsOfPredecessor( successorLayerIndex ) + [successorLayerSize * predecessorIndex + j]; } } } @@ -1379,22 +1403,25 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( } } - // Compute mu from mu_hat. - mu[index] += mu_hat[index]; - for ( unsigned i = 0; i < layerSize; ++i ) + if ( layerIndex > 0 ) { - mu_hat[index][i] -= tightening.getCoeff( NeuronIndex( index, i ) ); - for ( unsigned j = 0; j < numCoeffs; ++j ) + // Compute mu from muHat. + mu[layerIndex] += muHat[layerIndex]; + for ( unsigned i = 0; i < layerSize; ++i ) { - const PolygonalTightening pt = prevTightenings[j]; - double prevCoeff = pt.getCoeff( NeuronIndex( index, i ) ); - if ( pt._type == PolygonalTightening::UB ) - { - mu_hat[index][i] -= gamma[j] * prevCoeff; - } - else + mu[layerIndex][i] -= tightening.getCoeff( NeuronIndex( layerIndex, i ) ); + for ( unsigned j = 0; j < prevTigheningsCount; ++j ) { - mu_hat[index][i] += gamma[j] * prevCoeff; + const PolygonalTightening pt = prevTightenings[j]; + double prevCoeff = pt.getCoeff( NeuronIndex( layerIndex, i ) ); + if ( pt._type == PolygonalTightening::LB ) + { + mu[layerIndex][i] -= gamma[j] * prevCoeff; + } + else + { + mu[layerIndex][i] += gamma[j] * prevCoeff; + } } } } @@ -1404,12 +1431,12 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( Vector inputLayerBound( inputLayerSize, 0 ); for ( unsigned i = 0; i < inputLayerSize; ++i ) { - inputLayerBound[i] += tightening.getCoeff( NeuronIndex( 0, i ) ) - mu_hat[0][i]; - for ( unsigned j = 0; j < numCoeffs; ++j ) + inputLayerBound[i] += tightening.getCoeff( NeuronIndex( 0, i ) ) - muHat[0][i]; + for ( unsigned j = 0; j < prevTigheningsCount; ++j ) { const PolygonalTightening pt = prevTightenings[j]; double prevCoeff = pt.getCoeff( NeuronIndex( 0, i ) ); - if ( pt._type == PolygonalTightening::UB ) + if ( pt._type == PolygonalTightening::LB ) { inputLayerBound[i] += gamma[j] * prevCoeff; } @@ -1422,23 +1449,25 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( // Compute lower bound for polygonal tightening bias using mu and inputLayerBound. double lowerBound = 0; - for ( unsigned i = 0; i < numGamma; ++i ) + for ( unsigned i = 0; i < prevTigheningsCount; ++i ) { const PolygonalTightening pt = prevTightenings[i]; - if ( pt._type == PolygonalTightening::UB ) + if ( pt._type == PolygonalTightening::LB ) { - lowerBound += gamma[i] * pt._value; + lowerBound -= gamma[i] * pt._value; } else { - lowerBound -= gamma[i] * pt._value; + lowerBound += gamma[i] * pt._value; } } - for ( unsigned index = maxLayer; index > 0; --index ) + + for ( int index = maxLayer; index > 0; --index ) { - Layer *layer = layers[index]; + Layer *layer = _layerIndexToLayer[index]; const unsigned layerSize = layer->getSize(); + const unsigned layerIndex = layer->getLayerIndex(); if ( layer->getLayerType() == Layer::WEIGHTED_SUM ) { @@ -1447,11 +1476,11 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( { if ( !layer->neuronEliminated( i ) ) { - lowerBound -= mu[index][i] * biases[i]; + lowerBound -= mu[layerIndex][i] * biases[i]; } else { - lowerBound -= mu[index][i] * layer->getEliminatedNeuronValue( i ); + lowerBound -= mu[layerIndex][i] * layer->getEliminatedNeuronValue( i ); } } } @@ -1461,27 +1490,28 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( { if ( !layer->neuronEliminated( i ) ) { - if ( mu[index][i] > 0 ) + if ( mu[layerIndex][i] > 0 ) { - lowerBound -= - mu[index][i] * _symbolicUpperBiasInTermsOfPredecessor[index][i]; + lowerBound -= mu[layerIndex][i] * + getSymbolicUpperBiasInTermsOfPredecessor( layerIndex )[i]; } else { - lowerBound += - mu[index][i] * _symbolicLowerBiasInTermsOfPredecessor[index][i]; + lowerBound += mu[layerIndex][i] * + getSymbolicLowerBiasInTermsOfPredecessor( layerIndex )[i]; } } else { lowerBound -= - FloatUtils::abs( mu[index][i] ) * layer->getEliminatedNeuronValue( i ); + FloatUtils::abs( mu[layerIndex][i] ) * layer->getEliminatedNeuronValue( i ); } } } } - Layer *inputLayer = layers[0]; + + Layer *inputLayer = _layerIndexToLayer[0]; const double *inputLbs = inputLayer->getLbs(); const double *inputUbs = inputLayer->getUbs(); for ( unsigned i = 0; i < inputLayerSize; ++i ) @@ -1498,10 +1528,8 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( return lowerBound; } -const Vector -NetworkLevelReasoner::generatePolygonalTightenings( const Map &layers ) +const Vector NetworkLevelReasoner::generatePolygonalTightenings() { - Vector tightenings = Vector( {} ); if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || Options::get()->getMILPSolverBoundTighteningType() == @@ -1509,93 +1537,105 @@ NetworkLevelReasoner::generatePolygonalTightenings( const Map Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) { - unsigned neuronCount = GlobalConfiguration::PMNR_SELECTED_NEURONS; - Vector lowerBoundTightenings = Vector( {} ); - Vector upperBoundTightenings = Vector( {} ); - const List constraints = selectConstraints( layers ); - for ( const auto &pair : constraints ) - { - unsigned layerIndex = pair._layer; - unsigned neuron = pair._neuron; - Layer *layer = layers[layerIndex]; - unsigned layerSize = layer->getSize(); + return generatePolygonalTighteningsForPMNR(); + } + else if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP ) + { + return generatePolygonalTighteningsForInvprop(); + } - Map neuronToLowerCoefficient = Map( {} ); - Map neuronToUpperCoefficient = Map( {} ); - neuronToLowerCoefficient[pair] = -1; - neuronToUpperCoefficient[pair] = -1; + const Vector tighteningsVector = Vector( {} ); + return tighteningsVector; +} - List sources = layer->getActivationSources( neuron ); - unsigned predecessorIndex = 0; - for ( const auto &sourceIndex : sources ) - { - neuronToLowerCoefficient[sourceIndex] = - _symbolicLbInTermsOfPredecessor[layerIndex] - [neuron * layerSize + predecessorIndex]; - neuronToUpperCoefficient[sourceIndex] = - _symbolicLbInTermsOfPredecessor[layerIndex] - [neuron * layerSize + predecessorIndex]; - ++predecessorIndex; - } - PolygonalTightening lowerTightening( - neuronToLowerCoefficient, 0, PolygonalTightening::LB ); - PolygonalTightening upperTightening( - neuronToLowerCoefficient, 0, PolygonalTightening::UB ); - lowerBoundTightenings.append( lowerTightening ); - upperBoundTightenings.append( upperTightening ); +const Vector NetworkLevelReasoner::generatePolygonalTighteningsForPMNR() +{ + Vector tightenings = Vector( {} ); + unsigned neuronCount = GlobalConfiguration::PMNR_SELECTED_NEURONS; + Vector lowerBoundTightenings = Vector( {} ); + Vector upperBoundTightenings = Vector( {} ); + const Vector constraints = selectConstraints(); + + for ( const auto &pair : constraints ) + { + unsigned layerIndex = pair._layer; + unsigned neuron = pair._neuron; + Layer *layer = _layerIndexToLayer[layerIndex]; + unsigned layerSize = layer->getSize(); + + Map neuronToLowerCoefficient = Map( {} ); + Map neuronToUpperCoefficient = Map( {} ); + neuronToLowerCoefficient[pair] = -1; + neuronToUpperCoefficient[pair] = -1; + + List sources = layer->getActivationSources( neuron ); + unsigned predecessorIndex = 0; + for ( const auto &sourceIndex : sources ) + { + neuronToLowerCoefficient[sourceIndex] = getSymbolicLbInTermsOfPredecessor( + layerIndex )[predecessorIndex * layerSize + neuron]; + neuronToUpperCoefficient[sourceIndex] = getSymbolicUbInTermsOfPredecessor( + layerIndex )[predecessorIndex * layerSize + neuron]; + ++predecessorIndex; } + PolygonalTightening lowerTightening( neuronToLowerCoefficient, 0, PolygonalTightening::UB ); + PolygonalTightening upperTightening( neuronToLowerCoefficient, 0, PolygonalTightening::LB ); + lowerBoundTightenings.append( lowerTightening ); + upperBoundTightenings.append( upperTightening ); + } - int range = ( 1 << neuronCount ) - 1; - for ( int i = 0; i < range; ++i ) + int range = ( 1 << neuronCount ) - 1; + for ( int i = 0; i < range; ++i ) + { + Map neuronToLowerCoefficient = Map( {} ); + Map neuronToUpperCoefficient = Map( {} ); + for ( unsigned j = 0; j < neuronCount; ++j ) { - Map neuronToLowerCoefficient = Map( {} ); - Map neuronToUpperCoefficient = Map( {} ); - for ( unsigned j = 0; j < neuronCount; ++j ) + int flag = ( i >> j ) % 2; + if ( flag > 0 ) { - int flag = ( i >> j ) % 2; - if ( flag > 0 ) + for ( const auto &pair : lowerBoundTightenings[j]._neuronToCoefficient ) { - for ( const auto &pair : lowerBoundTightenings[j]._neuronToCoefficient ) - { - neuronToLowerCoefficient[pair.first] = pair.second; - } - for ( const auto &pair : upperBoundTightenings[j]._neuronToCoefficient ) - { - neuronToUpperCoefficient[pair.first] = pair.second; - } + neuronToLowerCoefficient[pair.first] = pair.second; + } + for ( const auto &pair : upperBoundTightenings[j]._neuronToCoefficient ) + { + neuronToUpperCoefficient[pair.first] = pair.second; } } - PolygonalTightening lowerTightening( - neuronToLowerCoefficient, 0, PolygonalTightening::LB ); - PolygonalTightening upperTightening( - neuronToLowerCoefficient, 0, PolygonalTightening::UB ); - tightenings.append( lowerTightening ); - tightenings.append( upperTightening ); } + PolygonalTightening lowerTightening( neuronToLowerCoefficient, 0, PolygonalTightening::UB ); + PolygonalTightening upperTightening( neuronToLowerCoefficient, 0, PolygonalTightening::LB ); + tightenings.append( lowerTightening ); + tightenings.append( upperTightening ); } + const Vector tighteningsVector = + Vector( tightenings ); + return tighteningsVector; +} - else if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP ) +const Vector NetworkLevelReasoner::generatePolygonalTighteningsForInvprop() +{ + Vector tightenings = Vector( {} ); + for ( const auto &pair : _layerIndexToLayer ) { - for ( const auto &pair : layers ) + unsigned layerIndex = pair.first; + Layer *layer = pair.second; + const Vector &nonFixedNeurons = getNonFixedNeurons( layer ); + for ( const unsigned neuron : nonFixedNeurons ) { - unsigned layerIndex = pair.first; - Layer *layer = pair.second; - const Vector nonFixedNeurons = getNonFixedNeurons( layer ); - for ( const unsigned neuron : nonFixedNeurons ) - { - NeuronIndex index( layerIndex, neuron ); - Map neuronToLowerCoefficient = Map( {} ); - Map neuronToUpperCoefficient = Map( {} ); - neuronToLowerCoefficient[index] = 1; - neuronToUpperCoefficient[index] = 1; - PolygonalTightening lowerTightening( - neuronToLowerCoefficient, layer->getUb( neuron ), PolygonalTightening::LB ); - PolygonalTightening upperTightening( - neuronToLowerCoefficient, layer->getLb( neuron ), PolygonalTightening::UB ); - tightenings.append( lowerTightening ); - tightenings.append( upperTightening ); - } + NeuronIndex index( layerIndex, neuron ); + Map neuronToLowerCoefficient = Map( {} ); + Map neuronToUpperCoefficient = Map( {} ); + neuronToLowerCoefficient[index] = 1; + neuronToUpperCoefficient[index] = 1; + PolygonalTightening lowerTightening( + neuronToLowerCoefficient, layer->getUb( neuron ), PolygonalTightening::UB ); + PolygonalTightening upperTightening( + neuronToLowerCoefficient, layer->getLb( neuron ), PolygonalTightening::LB ); + tightenings.append( lowerTightening ); + tightenings.append( upperTightening ); } } const Vector tighteningsVector = @@ -1603,204 +1643,220 @@ NetworkLevelReasoner::generatePolygonalTightenings( const Map return tighteningsVector; } -const List -NetworkLevelReasoner::selectConstraints( const Map &layers ) +const Vector NetworkLevelReasoner::selectConstraints() { - // ... - List neuronList = List( {} ); - unsigned neuronCount = GlobalConfiguration::PMNR_SELECTED_NEURONS; - - switch ( Options::get()->getMILPSolverBoundTighteningType() ) + if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM ) { - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM: + return selectConstraintsForPMNRRandom(); + } + else if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT ) { - Vector candidateLayers = getLayersWithNonFixedNeurons( layers ); - std::mt19937_64 rng( GlobalConfiguration::PMNR_RANDOM_SEED ); - std::uniform_int_distribution dis_layer( 0, candidateLayers.size() - 1 ); - unsigned entry = dis_layer( rng ); - unsigned index = candidateLayers[entry]; - - Layer *layer = _layerIndexToLayer[index]; - Vector candidateNeurons = getNonFixedNeurons( layer ); - std::uniform_int_distribution dis_neuron( 0, candidateNeurons.size() - 1 ); - for ( unsigned i = 0; i < neuronCount; ++i ) - { - unsigned entry = dis_neuron( rng ); - unsigned neuron = candidateNeurons[entry]; - neuronList.append( NeuronIndex( index, neuron ) ); - } - break; + return selectConstraintsForPMNRGradient(); } - - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT: + else if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) { - initializeSymbolicBoundsMaps(); + return selectConstraintsForPMNRBBPS(); + } - double maxScore = 0; - unsigned maxScoreIndex = 0; - Map neuronIndexToScore; - for ( const auto &pair : _layerIndexToLayer ) - { - double score = 0; - unsigned index = pair.first; - Layer *layer = pair.second; - unsigned layerSize = layer->getSize(); + const Vector vect( GlobalConfiguration::PMNR_SELECTED_NEURONS ); + return vect; +} - if ( getNonFixedNeurons( layer ).size() == 0 ) - { - continue; - } +const Vector NetworkLevelReasoner::selectConstraintsForPMNRRandom() +{ + unsigned neuronCount = GlobalConfiguration::PMNR_SELECTED_NEURONS; + Vector neuronVector = Vector( neuronCount ); - for ( unsigned i = 0; i < layerSize; ++i ) - { - if ( isNeuronNonFixed( layer, i ) ) - { - double neuronScore = std::pow( - ( _outputLayerSymbolicLb[index][i] + _outputLayerSymbolicUb[index][i] ) / - 2.0, - 2 ); - neuronIndexToScore.insert( NeuronIndex( index, i ), neuronScore ); - score += neuronScore; - } - } + const Vector &candidateLayers = getLayersWithNonFixedNeurons(); + std::mt19937_64 rng( GlobalConfiguration::PMNR_RANDOM_SEED ); + std::uniform_int_distribution disLayer( 0, candidateLayers.size() - 1 ); + unsigned entry = disLayer( rng ); + unsigned index = candidateLayers[entry]; - if ( score > maxScore ) - { - maxScore = score; - maxScoreIndex = index; - } - } + Layer *layer = _layerIndexToLayer[index]; + const Vector &candidateNeurons = getNonFixedNeurons( layer ); + std::vector candidateNeuronsVector = candidateNeurons.getContainer(); + std::shuffle( candidateNeuronsVector.begin(), candidateNeuronsVector.end(), rng ); + for ( unsigned i = 0; i < neuronCount; ++i ) + { + unsigned neuron = candidateNeurons[i]; + NeuronIndex idx = NeuronIndex( index, neuron ); + neuronVector[i] = idx; + } - Layer *layer = _layerIndexToLayer[maxScoreIndex]; + const Vector vect( neuronVector ); + return vect; +} + +const Vector NetworkLevelReasoner::selectConstraintsForPMNRGradient() +{ + unsigned neuronCount = GlobalConfiguration::PMNR_SELECTED_NEURONS; + unsigned outputLayerSize = _layerIndexToLayer[getNumberOfLayers() - 1]->getSize(); + Vector neuronVector = Vector( neuronCount ); + double maxScore = 0; + unsigned maxScoreIndex = 0; + Map neuronIndexToScore; + for ( const auto &pair : _layerIndexToLayer ) + { + double score = 0; + unsigned index = pair.first; + Layer *layer = pair.second; unsigned layerSize = layer->getSize(); - std::priority_queue, - std::vector>, - std::less>> - max_priority_queue; + + if ( getNonFixedNeurons( layer ).size() == 0 ) + { + continue; + } + for ( unsigned i = 0; i < layerSize; ++i ) { if ( isNeuronNonFixed( layer, i ) ) { - double neuronScore = neuronIndexToScore[NeuronIndex( maxScoreIndex, i )]; - max_priority_queue.push( std::pair( neuronScore, i ) ); + double neuronScore = 0; + for ( unsigned j = 0; i < outputLayerSize; ++j ) + { + neuronScore += + std::pow( ( getOutputLayerSymbolicLb( index )[j * outputLayerSize + i] + + getOutputLayerSymbolicUb( index )[j * outputLayerSize + i] ) / + 2.0, + 2 ); + } + neuronIndexToScore.insert( NeuronIndex( index, i ), neuronScore ); + score += neuronScore; } } - for ( unsigned i = 0; i < neuronCount; ++i ) + if ( score > maxScore ) { - unsigned neuron = max_priority_queue.top().second; - neuronList.append( NeuronIndex( maxScoreIndex, neuron ) ); - max_priority_queue.pop(); + maxScore = score; + maxScoreIndex = index; } - break; } - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS: + Layer *layer = _layerIndexToLayer[maxScoreIndex]; + unsigned layerSize = layer->getSize(); + std::priority_queue, + std::vector>, + std::less>> + maxQueue; + for ( unsigned i = 0; i < layerSize; ++i ) { - initializeSymbolicBoundsMaps(); - - /*double maxScore = 0; - unsigned maxScoreIndex = 0; - Map neuronIndexToScore; - for ( const auto &pair : _layerIndexToLayer ) + if ( isNeuronNonFixed( layer, i ) ) { - double score = 0; - unsigned index = pair.first; - Layer *layer = pair.second; - unsigned layerSize = layer->getSize(); - unsigned inputLayerSize = _layerIndexToLayer[0]->getSize(); - unsigned outputLayerSize = _layerIndexToLayer[getNumberOfLayers() - 1]->getSize(); - Vector outputLayerScores( outputLayerSize, 0 ); - - if ( getNonFixedNeurons( layer ).size() == 0 ) - { - continue; - } - - double *Lbs = layer->getLbs(); - double *Ubs = layer->getUbs(); - double *symbolicLb = layer->getSymbolicLb(); - double *symbolicUb = layer->getSymbolicUb(); - double *symbolicLowerBias = layer->getSymbolicLowerBias(); - double *symbolicUpperBias = layer->getSymbolicUpperBias(); - for ( unsigned i = 0; i < layerSize; ++i ) - { - - - - for ( unsigned j = 0; j < outputLayerSize; j++ ) - { - outputLayerScores[j] = _outputLayerSymbolicLowerBias[j]; - - if ( _outputLayerSymbolicLb[index][i] > 0 ) - { - outputLayerScores[j] += _outputLayerSymbolicLb[index][i] * - symbolicLb[i]; - } - else - { - outputLayerScores[j] += _outputLayerSymbolicLb[index][i] * - symbolicUb[i]; - } + double neuronScore = neuronIndexToScore[NeuronIndex( maxScoreIndex, i )]; + maxQueue.push( std::pair( neuronScore, i ) ); + } + } - for ( unsigned k = 0; k < inputLayerSize; k++ ) - { + for ( unsigned i = 0; i < neuronCount; ++i ) + { + unsigned neuron = maxQueue.top().second; + NeuronIndex idx = NeuronIndex( maxScoreIndex, neuron ); + neuronVector[i] = idx; + maxQueue.pop(); + } - } - } + const Vector vect( neuronVector ); + return vect; +} - for ( unsigned j = 0; j < outputLayerSize; j++ ) - { - neuronScore += std::pow( outputLayerScores[j], 2 ); - } +const Vector NetworkLevelReasoner::selectConstraintsForPMNRBBPS() +{ + unsigned neuronCount = GlobalConfiguration::PMNR_SELECTED_NEURONS; + Vector neuronVector = Vector( neuronCount ); - neuronIndexToScore.insert( NeuronIndex( index, i ), neuronScore ); - score += neuronScore; - } - } + double maxScore = 0; + unsigned maxScoreIndex = 0; + Map neuronIndexToScore; + for ( const auto &pair : _layerIndexToLayer ) + { + double score = 0; + unsigned index = pair.first; + Layer *layer = pair.second; + unsigned layerSize = layer->getSize(); - if ( score > maxScore ) - { - maxScore = score; - maxScoreIndex = index; - } + if ( getNonFixedNeurons( layer ).size() == 0 ) + { + continue; } - Layer *layer = _layerIndexToLayer[maxScoreIndex]; - unsigned layerSize = layer->getSize(); - std::priority_queue, - std::vector>, - std::less>> - max_priority_queue; for ( unsigned i = 0; i < layerSize; ++i ) { if ( isNeuronNonFixed( layer, i ) ) { - double neuronScore = neuronIndexToScore[NeuronIndex( maxScoreIndex, i )]; - max_priority_queue.push( std::pair( neuronScore, i ) ); + double neuronScore = getBBPSScore( NeuronIndex( index, i ) ); + neuronIndexToScore.insert( NeuronIndex( index, i ), neuronScore ); + score += neuronScore; } } - for ( unsigned i = 0; i < neuronCount; ++i ) + if ( score > maxScore ) { - unsigned neuron = max_priority_queue.top().second; - neuronList.append( NeuronIndex( maxScoreIndex, neuron ) ); - max_priority_queue.pop(); + maxScore = score; + maxScoreIndex = index; } - break;*/ } - default: - break; + Layer *layer = _layerIndexToLayer[maxScoreIndex]; + unsigned layerSize = layer->getSize(); + std::priority_queue, + std::vector>, + std::less>> + maxQueue; + for ( unsigned i = 0; i < layerSize; ++i ) + { + if ( isNeuronNonFixed( layer, i ) ) + { + double neuronScore = neuronIndexToScore[NeuronIndex( maxScoreIndex, i )]; + maxQueue.push( std::pair( neuronScore, i ) ); + } } - const List list( neuronList ); - return list; + for ( unsigned i = 0; i < neuronCount; ++i ) + { + unsigned neuron = maxQueue.top().second; + NeuronIndex idx = NeuronIndex( maxScoreIndex, neuron ); + neuronVector[i] = idx; + maxQueue.pop(); + } + + const Vector vect( neuronVector ); + return vect; +} + +void NetworkLevelReasoner::initializeBBPSMaps() +{ + for ( const auto &pair : _layerIndexToLayer ) + { + unsigned index = pair.first; + Layer *layer = pair.second; + unsigned layerSize = layer->getSize(); + + if ( getNonFixedNeurons( layer ).size() == 0 ) + { + continue; + } + + for ( unsigned i = 0; i < layerSize; ++i ) + { + if ( isNeuronNonFixed( layer, i ) ) + { + double score = 0; + _neuronToBBPSScores.insert( NeuronIndex( index, i ), score ); + } + } + } } -double NetworkLevelReasoner::getBranchingPoint( Layer *layer, unsigned neuron ) const +Map NetworkLevelReasoner::getBranchingPoint( Layer *layer, + unsigned neuron ) const { ASSERT( isNeuronNonFixed( layer, neuron ) ); + Map point; + double lb = layer->getLb( neuron ); double ub = layer->getUb( neuron ); @@ -1815,36 +1871,39 @@ double NetworkLevelReasoner::getBranchingPoint( Layer *layer, unsigned neuron ) branchingPoint = ( lb + ub ) / 2.0; } - return branchingPoint; + branchingPoint = branchingPoint; + + return point; } const Map> -NetworkLevelReasoner::getParametersForLayers( const Map &layers, - const Vector &coeffs ) const +NetworkLevelReasoner::getParametersForLayers( const Vector &coeffs ) const { + unsigned totalCoeffsCount = getNumberOfParameters(); + ASSERT( coeffs.size() == totalCoeffsCount ); unsigned index = 0; Map> layerIndicesToParameters; - for ( const auto &pair : layers ) + for ( const auto &pair : _layerIndexToLayer ) { unsigned layerIndex = pair.first; Layer *layer = pair.second; - unsigned n_coeffs = getNumberOfParametersPerType( layer->getLayerType() ); - Vector current_coeffs( n_coeffs ); - for ( unsigned i = 0; i < n_coeffs; ++i ) + unsigned coeffsCount = getNumberOfParametersPerType( layer->getLayerType() ); + Vector currentCoeffs( coeffsCount ); + for ( unsigned i = 0; i < coeffsCount; ++i ) { - current_coeffs[i] = coeffs[index + i]; + currentCoeffs[i] = coeffs[index + i]; } - layerIndicesToParameters.insert( layerIndex, current_coeffs ); - index += n_coeffs; + layerIndicesToParameters.insert( layerIndex, currentCoeffs ); + index += coeffsCount; } const Map> parametersForLayers( layerIndicesToParameters ); return parametersForLayers; } -unsigned NetworkLevelReasoner::getNumberOfParameters( const Map &layers ) const +unsigned NetworkLevelReasoner::getNumberOfParameters() const { unsigned num = 0; - for ( const auto &pair : layers ) + for ( const auto &pair : _layerIndexToLayer ) { Layer *layer = pair.second; num += getNumberOfParametersPerType( layer->getLayerType() ); @@ -1863,15 +1922,14 @@ unsigned NetworkLevelReasoner::getNumberOfParametersPerType( Layer::Type t ) con return 0; } -const Vector -NetworkLevelReasoner::getLayersWithNonFixedNeurons( const Map &layers ) const +const Vector NetworkLevelReasoner::getLayersWithNonFixedNeurons() const { Vector layerWithNonFixedNeurons = Vector( {} ); - for ( const auto &pair : layers ) + for ( const auto &pair : _layerIndexToLayer ) { unsigned layerIndex = pair.first; Layer *layer = pair.second; - const Vector nonFixedNeurons = getNonFixedNeurons( layer ); + const Vector &nonFixedNeurons = getNonFixedNeurons( layer ); if ( nonFixedNeurons.size() > 0 ) { layerWithNonFixedNeurons.append( layerIndex ); @@ -2060,21 +2118,18 @@ bool NetworkLevelReasoner::isNeuronNonFixed( Layer *layer, unsigned neuron ) con return nonFixed; } -unsigned NetworkLevelReasoner::countNonlinearLayers( const Map &layers ) const +void NetworkLevelReasoner::initializeSymbolicBoundsMaps( const Vector &coeffs ) { - unsigned num = 0; - for ( auto pair : layers ) - { - unsigned layerIndex = pair.first; - Layer *layer = layers[layerIndex]; - num += layer->getLayerType() != Layer::WEIGHTED_SUM ? 1 : 0; - } - return num; -} + // Clear the previous symbolic bound maps. + _outputLayerSymbolicLb.clear(); + _outputLayerSymbolicUb.clear(); + _outputLayerSymbolicLowerBias.clear(); + _outputLayerSymbolicUpperBias.clear(); -void NetworkLevelReasoner::initializeSymbolicBoundsMaps() -{ - freeMemoryForSymbolicBoundMapsIfNeeded(); + _symbolicLbInTermsOfPredecessor.clear(); + _symbolicUbInTermsOfPredecessor.clear(); + _symbolicLowerBiasInTermsOfPredecessor.clear(); + _symbolicUpperBiasInTermsOfPredecessor.clear(); // Temporarily add weighted sum layer to the NLR of the same size of the output layer. Layer *outputLayer = _layerIndexToLayer[getNumberOfLayers() - 1]; @@ -2093,159 +2148,64 @@ void NetworkLevelReasoner::initializeSymbolicBoundsMaps() newLayer->setUb( i, FloatUtils::negativeInfinity() ); } - // Populate symbolic bounds maps via DeepPoly. - allocateMemoryForSymbolicBoundMapsIfNeeded(); - - if ( _deepPolyAnalysis == nullptr ) - _deepPolyAnalysis = std::unique_ptr( - new DeepPolyAnalysis( this, - true, - true, - &_outputLayerSymbolicLb, - &_outputLayerSymbolicUb, - &_outputLayerSymbolicLowerBias, - &_outputLayerSymbolicUpperBias, - &_symbolicLbInTermsOfPredecessor, - &_symbolicUbInTermsOfPredecessor, - &_symbolicLowerBiasInTermsOfPredecessor, - &_symbolicUpperBiasInTermsOfPredecessor ) ); - _deepPolyAnalysis->run(); - - // Remove new weighted sum layer. - removeLayerDependency( outputLayerIndex, newLayerIndex ); - _layerIndexToLayer.erase( newLayerIndex ); - if ( newLayer ) - { - delete newLayer; - newLayer = NULL; - } -} - -void NetworkLevelReasoner::allocateMemoryForSymbolicBoundMapsIfNeeded() -{ - unsigned outputLayerSize = _layerIndexToLayer[getNumberOfLayers() - 1]->getSize(); + // Initialize maps with zero vectors. unsigned maxLayerSize = getMaxLayerSize(); for ( const auto &pair : _layerIndexToLayer ) { unsigned layerIndex = pair.first; Layer *layer = pair.second; unsigned layerSize = layer->getSize(); + Layer::Type layerType = layer->getLayerType(); - double *currentOutputSymbolicLb = new double[layerSize * outputLayerSize]; - double *currentOutputSymbolicUb = new double[layerSize * outputLayerSize]; - double *currentOutputSymbolicLowerBias = new double[outputLayerSize]; - double *currentOutputSymbolicUpperBias = new double[outputLayerSize]; - - double *currentPredecessorSymbolicLb = new double[maxLayerSize * layerSize]; - double *currentPredecessorSymbolicUb = new double[maxLayerSize * layerSize]; - double *currentPredecessorSymbolicLowerBias = new double[layerSize]; - double *currentPredecessorSymbolicUpperBias = new double[layerSize]; - - std::fill_n( currentOutputSymbolicLb, layerSize * outputLayerSize, 0 ); - std::fill_n( currentOutputSymbolicUb, layerSize * outputLayerSize, 0 ); - std::fill_n( currentOutputSymbolicLowerBias, outputLayerSize, 0 ); - std::fill_n( currentOutputSymbolicUpperBias, outputLayerSize, 0 ); - - std::fill_n( currentPredecessorSymbolicLb, maxLayerSize * layerSize, 0 ); - std::fill_n( currentPredecessorSymbolicUb, maxLayerSize * layerSize, 0 ); - std::fill_n( currentPredecessorSymbolicLowerBias, layerSize, 0 ); - std::fill_n( currentPredecessorSymbolicUpperBias, layerSize, 0 ); - - _outputLayerSymbolicLb.insert( layerIndex, currentOutputSymbolicLb ); - _outputLayerSymbolicUb.insert( layerIndex, currentOutputSymbolicUb ); - _outputLayerSymbolicLowerBias.insert( layerIndex, currentOutputSymbolicLowerBias ); - _outputLayerSymbolicUpperBias.insert( layerIndex, currentOutputSymbolicUpperBias ); - - _symbolicLbInTermsOfPredecessor.insert( layerIndex, currentPredecessorSymbolicLb ); - _symbolicUbInTermsOfPredecessor.insert( layerIndex, currentPredecessorSymbolicUb ); - _symbolicLowerBiasInTermsOfPredecessor.insert( layerIndex, - currentPredecessorSymbolicLowerBias ); - _symbolicUpperBiasInTermsOfPredecessor.insert( layerIndex, - currentPredecessorSymbolicUpperBias ); - } -} - -void NetworkLevelReasoner::freeMemoryForSymbolicBoundMapsIfNeeded() -{ - for ( auto &pair : _outputLayerSymbolicLb ) - { - if ( pair.second ) - { - delete[] pair.second; - pair.second = NULL; - } - } - _outputLayerSymbolicLb.clear(); - - for ( auto &pair : _outputLayerSymbolicUb ) - { - if ( pair.second ) - { - delete[] pair.second; - pair.second = NULL; - } - } - _outputLayerSymbolicUb.clear(); - - for ( auto &pair : _outputLayerSymbolicLowerBias ) - { - if ( pair.second ) - { - delete[] pair.second; - pair.second = NULL; - } - } - _outputLayerSymbolicLowerBias.clear(); - - for ( auto &pair : _outputLayerSymbolicUpperBias ) - { - if ( pair.second ) - { - delete[] pair.second; - pair.second = NULL; - } - } - _outputLayerSymbolicUpperBias.clear(); - - for ( auto &pair : _symbolicLbInTermsOfPredecessor ) - { - if ( pair.second ) - { - delete[] pair.second; - pair.second = NULL; - } - } - _symbolicLbInTermsOfPredecessor.clear(); + _outputLayerSymbolicLb[layerIndex] = Vector( outputLayerSize * layerSize, 0 ); + _outputLayerSymbolicUb[layerIndex] = Vector( outputLayerSize * layerSize, 0 ); + _outputLayerSymbolicLowerBias[layerIndex] = Vector( outputLayerSize, 0 ); + _outputLayerSymbolicUpperBias[layerIndex] = Vector( outputLayerSize, 0 ); - for ( auto &pair : _symbolicUbInTermsOfPredecessor ) - { - if ( pair.second ) + if ( layerType != Layer::WEIGHTED_SUM && layerType != Layer::INPUT ) { - delete[] pair.second; - pair.second = NULL; + _symbolicLbInTermsOfPredecessor[layerIndex] = + Vector( layerSize * maxLayerSize, 0 ); + _symbolicUbInTermsOfPredecessor[layerIndex] = + Vector( layerSize * maxLayerSize, 0 ); + _symbolicLowerBiasInTermsOfPredecessor[layerIndex] = Vector( layerSize, 0 ); + _symbolicUpperBiasInTermsOfPredecessor[layerIndex] = Vector( layerSize, 0 ); } } - _symbolicUbInTermsOfPredecessor.clear(); - for ( auto &pair : _symbolicLowerBiasInTermsOfPredecessor ) - { - if ( pair.second ) - { - delete[] pair.second; - pair.second = NULL; - } - } - _symbolicLowerBiasInTermsOfPredecessor.clear(); + // Populate symbolic bounds maps via DeepPoly. + bool useParameterisedSBT = coeffs.size() > 0; + Map> layerIndicesToParameters = Map>( {} ); + if ( useParameterisedSBT ) + { + layerIndicesToParameters = getParametersForLayers( coeffs ); + } + + _deepPolyAnalysis = std::unique_ptr( + new DeepPolyAnalysis( this, + true, + true, + useParameterisedSBT, + &layerIndicesToParameters, + &_outputLayerSymbolicLb, + &_outputLayerSymbolicUb, + &_outputLayerSymbolicLowerBias, + &_outputLayerSymbolicUpperBias, + &_symbolicLbInTermsOfPredecessor, + &_symbolicUbInTermsOfPredecessor, + &_symbolicLowerBiasInTermsOfPredecessor, + &_symbolicUpperBiasInTermsOfPredecessor ) ); + _deepPolyAnalysis->run(); + _deepPolyAnalysis = nullptr; - for ( auto &pair : _symbolicUpperBiasInTermsOfPredecessor ) + // Remove new weighted sum layer. + removeLayerDependency( outputLayerIndex, newLayerIndex ); + _layerIndexToLayer.erase( newLayerIndex ); + if ( newLayer ) { - if ( pair.second ) - { - delete[] pair.second; - pair.second = NULL; - } + delete newLayer; + newLayer = NULL; } - _symbolicUpperBiasInTermsOfPredecessor.clear(); } void NetworkLevelReasoner::mergeWSLayers( unsigned secondLayerIndex, diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index 6b3b78afb1..2b4c0dbb1a 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -147,7 +147,7 @@ class NetworkLevelReasoner : public LayerOwner void MILPTighteningForOneLayer( unsigned targetIndex ); void iterativePropagation(); - void initializeSymbolicBoundsMaps(); + void initializeSymbolicBoundsMaps( const Vector &coeffs = Vector( {} ) ); // Return optimizable parameters which minimize parameterised SBT bounds' volume. const Vector OptimalParameterisedSymbolicBoundTightening(); @@ -155,12 +155,15 @@ class NetworkLevelReasoner : public LayerOwner // Optimize biases of generated parameterised polygonal tightenings. const Vector OptimizeParameterisedPolygonalTightening(); + // Get total number of optimizable parameters for parameterised SBT relaxation. + unsigned getNumberOfParameters() const; + void receiveTighterBound( Tightening tightening ); void getConstraintTightenings( List &tightenings ); void clearConstraintTightenings(); - void receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ); - void getConstraintPolygonalTightenings( List &polygonal_tightenings ); + void receivePolygonalTighterBound( PolygonalTightening polygonalTightening ); + void getConstraintPolygonalTightenings( List &polygonalTightening ); void clearConstraintPolygonalTightenings(); /* @@ -207,15 +210,18 @@ class NetworkLevelReasoner : public LayerOwner */ double getPreviousBias( const ReluConstraint *reluConstraint ) const; - const double *getOutputLayerSymbolicLb( unsigned layerIndex ) const; - const double *getOutputLayerSymbolicUb( unsigned layerIndex ) const; - const double *getOutputLayerSymbolicLowerBias( unsigned layerIndex ) const; - const double *getOutputLayerSymbolicUpperBias( unsigned layerIndex ) const; + Vector getOutputLayerSymbolicLb( unsigned layerIndex ) const; + Vector getOutputLayerSymbolicUb( unsigned layerIndex ) const; + Vector getOutputLayerSymbolicLowerBias( unsigned layerIndex ) const; + Vector getOutputLayerSymbolicUpperBias( unsigned layerIndex ) const; + + Vector getSymbolicLbInTermsOfPredecessor( unsigned layerIndex ) const; + Vector getSymbolicUbInTermsOfPredecessor( unsigned layerIndex ) const; + Vector getSymbolicLowerBiasInTermsOfPredecessor( unsigned layerIndex ) const; + Vector getSymbolicUpperBiasInTermsOfPredecessor( unsigned layerIndex ) const; - const double *getSymbolicLbInTermsOfPredecessor( unsigned layerIndex ) const; - const double *getSymbolicUbInTermsOfPredecessor( unsigned layerIndex ) const; - const double *getSymbolicLowerBiasInTermsOfPredecessor( unsigned layerIndex ) const; - const double *getSymbolicUpperBiasInTermsOfPredecessor( unsigned layerIndex ) const; + Map getBBPSBranchingPoint( NeuronIndex index ) const; + double getBBPSScore( NeuronIndex index ) const; /* Finds logically consecutive WS layers and merges them, in order @@ -251,10 +257,6 @@ class NetworkLevelReasoner : public LayerOwner void freeMemoryIfNeeded(); - // Manage memory for symbolic bounds maps. - void allocateMemoryForSymbolicBoundMapsIfNeeded(); - void freeMemoryForSymbolicBoundMapsIfNeeded(); - List _constraintsInTopologicalOrder; // Map each neuron to a linear expression representing its weighted sum @@ -285,33 +287,34 @@ class NetworkLevelReasoner : public LayerOwner unsigned outputDimension ); void reduceLayerIndex( unsigned layer, unsigned startIndex ); - void optimizeBoundsWithPreimageApproximation( Map &layers, - LPFormulator &lpFormulator ); + void optimizeBoundsWithPreimageApproximation( LPFormulator &lpFormulator ); // Estimate Volume of parameterised symbolic bound tightening. - double EstimateVolume( const Map &layers, const Vector &coeffs ); + double EstimateVolume( const Vector &coeffs ); - void optimizeBoundsWithPMNR( Map &layers, LPFormulator &lpFormulator ); + void optimizeBoundsWithPMNR( LPFormulator &lpFormulator ); // Optimize biases of generated parameterised polygonal tightenings. double - OptimizeSingleParameterisedPolygonalTightening( const Map &layers, - PolygonalTightening &tightening, + OptimizeSingleParameterisedPolygonalTightening( PolygonalTightening &tightening, Vector &prevTightenings ); // Get current lower bound for selected parameterised polygonal tightenings' biases. double - getParameterisdPolygonalTighteningLowerBound( const Map &layers, - const Vector &coeffs, + getParameterisdPolygonalTighteningLowerBound( const Vector &coeffs, const Vector &gamma, PolygonalTightening &tightening, Vector &prevTightenings ); - const Vector - generatePolygonalTightenings( const Map &layers ); + const Vector generatePolygonalTightenings(); + const Vector generatePolygonalTighteningsForPMNR(); + const Vector generatePolygonalTighteningsForInvprop(); // Heuristically select neurons and polygonal tightenings for PMNR. - const List selectConstraints( const Map &layers ); + const Vector selectConstraints(); + const Vector selectConstraintsForPMNRRandom(); + const Vector selectConstraintsForPMNRGradient(); + const Vector selectConstraintsForPMNRBBPS(); // Return difference between given point and upper and lower bounds determined by parameterised // SBT relaxation. @@ -319,31 +322,21 @@ class NetworkLevelReasoner : public LayerOwner Map &point, unsigned i ) const; - double getBranchingPoint( Layer *layer, unsigned neuron ) const; - // Get map containing vector of optimizable parameters for parameterised SBT relaxation for // every layer index. const Map> - getParametersForLayers( const Map &layers, - const Vector &coeffs ) const; - - // Get total number of optimizable parameters for parameterised SBT relaxation. - unsigned getNumberOfParameters( const Map &layers ) const; + getParametersForLayers( const Vector &coeffs ) const; // Get number of optimizable parameters for parameterised SBT relaxation per layer type. unsigned getNumberOfParametersPerType( Layer::Type t ) const; // Get all indices of active non-weighted sum layers for INVPROP. - const Vector - getLayersWithNonFixedNeurons( const Map &layers ) const; + const Vector getLayersWithNonFixedNeurons() const; const Vector getNonFixedNeurons( Layer *layer ) const; bool isNeuronNonFixed( Layer *layer, unsigned neuron ) const; - // Get total number of non-weighted sum layers for INVPROP. - unsigned countNonlinearLayers( const Map &layers ) const; - /* Store previous biases for each ReLU neuron in a map for getPreviousBias() and BaBSR heuristic @@ -351,6 +344,10 @@ class NetworkLevelReasoner : public LayerOwner Map _previousBiases; void initializePreviousBiasMap(); + void initializeBBPSMaps(); + + Map getBranchingPoint( Layer *layer, unsigned neuron ) const; + /* If the NLR is manipulated manually in order to generate a new input query, this method can be used to assign variable indices @@ -358,15 +355,18 @@ class NetworkLevelReasoner : public LayerOwner */ void reindexNeurons(); - Map _outputLayerSymbolicLb; - Map _outputLayerSymbolicUb; - Map _outputLayerSymbolicLowerBias; - Map _outputLayerSymbolicUpperBias; + Map> _neuronToBBPSBranchingPoints; + Map _neuronToBBPSScores; + + Map> _outputLayerSymbolicLb; + Map> _outputLayerSymbolicUb; + Map> _outputLayerSymbolicLowerBias; + Map> _outputLayerSymbolicUpperBias; - Map _symbolicLbInTermsOfPredecessor; - Map _symbolicUbInTermsOfPredecessor; - Map _symbolicLowerBiasInTermsOfPredecessor; - Map _symbolicUpperBiasInTermsOfPredecessor; + Map> _symbolicLbInTermsOfPredecessor; + Map> _symbolicUbInTermsOfPredecessor; + Map> _symbolicLowerBiasInTermsOfPredecessor; + Map> _symbolicUpperBiasInTermsOfPredecessor; }; } // namespace NLR diff --git a/src/nlr/tests/Test_DeepPolyAnalysis.h b/src/nlr/tests/Test_DeepPolyAnalysis.h index 8e852e30e0..77112d48e2 100644 --- a/src/nlr/tests/Test_DeepPolyAnalysis.h +++ b/src/nlr/tests/Test_DeepPolyAnalysis.h @@ -208,64 +208,6 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); for ( const auto &bound : expectedBounds ) TS_ASSERT( existsBound( bounds, bound ) ); - - - // Reset bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - // Invoke initializeSymbolicBoundsMaps - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); - - List expectedBounds2( - { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, 1, Tightening::LB ), Tightening( 10, 5.5, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds2.size(), bounds.size() ); - for ( const auto &bound : expectedBounds2 ) - TS_ASSERT( existsBound( bounds, bound ) ); } void populateResidualNetwork1( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 635949d23c..f523a9e993 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -1640,7 +1640,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x6 x7 x8 = softmax(x3, x4, x5) x9 = x6 + x7 + x8 - x10 = x6 + x7 + x8 + x10 = - x6 - x7 - x8 */ @@ -8195,7 +8195,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 2 (with residual from x0): - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] x3 range: [-1, 2.5] @@ -9718,12 +9718,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Lower bound: alpha_l = x3.lb = -1 beta = x2.lb = -1 - gamma_l = -x2.lb * x3.lb = --1 * -1 = -1 + gamma_l = -x2.lb x3.lb = --1 * -1 = -1 Upper bound: alpha_u = x3.ub = 3 beta = x2.lb = -1 - gamma_u = -x2.lb * x3.ub = --1 * 3 = 3 + gamma_u = -x2.lb x3.ub = --1 * 3 = 3 x4.lb = -1 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + -1 = -2x0 + x1 - 1 : [-7, -2] x4.ub = 3 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + 3 = 2x0 - 7x1 + 3 : [0, 21] @@ -9731,9 +9731,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 3: - x7.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] - x7.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] - x4 range: [-21, 5] + x5.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] + x5.ub = -1 ( -2x0 + x1 - 1 ) = 2x0 - x1 + 1 : [2, 7] + x5 range: [-21, 7] */ List expectedBounds( { Tightening( 2, -1, Tightening::LB ), @@ -9750,6 +9750,1915 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } + void test_parameterised_sbt_relus_all_active() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both ReLUs active, bound survive through activations: + + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. LbOfUb = 12, using standard ReLU lower + coefficient. Coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = 0.5x0 + 1.25x1 - 11.25 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_relus_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_relu_residual1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual1( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layer 1: + + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. LbOfUb = 1, using standard ReLU lower + coefficient. Coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layer 2 (with residual from x0): + + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] + x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. LbOfUb = 2, using standard ReLU lower + coefficient. Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + x4.lb = 0 + x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] + x4 range: [0, 2.5] + + Layer 3 (with residual from x1): + + x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] + + x5 range: [-2, 4] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 2, Tightening::LB ), + Tightening( 5, 5.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_relu_residual2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layer 1: + + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. LbOfUb = 1, using standard ReLU lower + coefficient. Coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layer 2 (with residual from x0): + + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] + x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. LbOfUb = 2, using standard ReLU lower + coefficient. Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + x4.lb = 0 + x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] + x4 range: [0, 2.5] + + Layer 3 (with residual from x0): + + x5.lb = 3 ( 0 ) + 1 ( x0 ) + 1 = 1x0 + 1 : [0, 2] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 1 ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 + = 7.5] x5 range: [0, 7.5] + + Layer 4: + x6.lb = 1x0 + 1 : [0, 2] + x6.ub = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] + x6 range: [0, 7.5] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 7.5, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 7.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_relu_reindex() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluReindex( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both ReLUs are undecided, bounds are concretized. LbOfUb = 2, using standard ReLU lower + coefficient. Coefficient: 2/( 2--2 ) = 2/4 = 0.5 + + x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 + x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 + x4 range: [0, 2] + + x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 + x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 + x5 range: [0, 2] + + Layer 2: + + x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] + x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] + x6 range: [-1, 3] + + x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] + x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] + x7 range: [-2, 2] + + Both ReLUs are undecided, bounds are concretized. + First ReLU: LbOfUb = 12, using standard ReLU coefficient. + Coefficient (first ReLU, lower): 1/( 1--1 ) = 1/2 = 0.5 + Coefficient (first ReLU, upper): 1 (propagated as is) + Second ReLU: LbOfUb = 0, using custom lower coefficient 0.5. + Coefficient (second ReLU, lower): 0.5 + Coefficient (second ReLU, upper): 2/( 2--2 ) = 2/4 = 0.5 + + x8.lb = 0.5 ( x0 ) = 0.5x0 + x8.ub = x0 + 2 + x8 range: [0, 3] + + x9.lb = 0.5 ( x1 - 1 ) = 0.5x1 - 0.5 + x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 + x9 range: [0, 2] + + Layer 3: + + x10.lb = 1 ( 0.5x0 ) + 1 ( 0.5x1 - 0.5 ) + 1 = 0.5x0 + 0.5x1 + 0.5 : [-0.5, 1.5] + x10.ub = 1 ( x0 + 2 ) + 1 ( 0.5x1 + 1.5 ) + 1 = x0 + 0.5x1 + 4.5 : [3, 6] + x10 range: [-0.5, 6] + + x11.lb = 0.5x1 - 0.5 + x11.ub = 0.5x1 + 1.5 + x11 range: [0, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -0.5, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_abs_all_positive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both absolute values positive, bound survive through activations: + + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_abs_positive_and_negative() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_absolute_values_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is undecided, bounds are concretized. + Second absolute value is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_absolute_values_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2 is eliminated, everything set to -3 + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Second absolute value is positive, bounds surive the activation + + x4: all set to 3 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_signs_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is undecided, bounds are concretized. + LbOfUb = -4 < 0, UbOfLb = 12 > 0, using custom coefficients with alpha = { 0.5, 0.5 }. + Second sign is active, bounds become constant 1 + Coefficient (first Sign, lower): 2/12 * 0.5 = 1/12. + Coefficient (first Sign, upper): -2/-4 * 0.5 = 1/4. + + x4 range: [-1, 1] + x4.lb = 1/12 ( 2x0 + 3x1 - 15 ) - 1 = 2/12 x0 + 3/12 x1 - 27/12 + x4.ub = 1/4 ( 2x0 + 3x1 - 15 ) + 1 = 0.5 x0 + 0.75x1 - 2.75 + + x5 range: [1, 1] + x5.lb = 1 + x5.ub = 1 + + Layer 2: + + x6.lb = 1 ( 2/12 x0 + 3/12 x1 - 27/12 ) - 1 ( 1 ) = 2/12 x0 + 3/12 x1 - 39/12 : + [-28/12 = -7/3, -1] + x6.ub = 1 ( 0.5 x0 + 0.75x1 - 2.75 ) - 1 ( 1 ) = 0.5 x0 + 0.75x1 - 3.75 : [-1, 3] + + x6 range: [-7/3, 3] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2.3333, Tightening::LB ), + Tightening( 6, 3, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_signs_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2 is eliminated, everything set to -3 + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is negative, bounds become constant -1 + Second sign is positive, bounds become constant 1 + + x4: all set to -1 + + x5: all set to 1 + + Layer 2: + + x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 + x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, -1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. Using custom lower coefficient with + alpha = { 0.5 }. + Lower Coefficient: ( 1 - 0.2 ) * 0.5 + 0.2 = 0.6 + Lower Bias: 0 + Upper Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Upper Bias: ( 0.2 - 1 ) * 2 * -2 /( 2--2 ) = 0.8 + + x4.lb = 0.6 ( x0 + x1 ) = 0.6 x0 + 0.6x1 + x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6 x0 + 0.6 x1 + 0.8 + x4 range: [-0.4, 2] + + x5.lb = 0.6 ( x0 - x1 ) = 0.6 x0 - 0.6 x1 + x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6 x0 - 0.6 x1 + 0.8 + x5 range: [-0.4, 2] + + Layer 2: + + x6.lb = 1 ( 0.6x0 + 0.6x1 ) + 1 ( 0.6x0 - 0.6x1 ) = 1.2 x0 : [-1.2, 1.2] + x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2 x0 + 1.6 : + [0.4, 2.8] x6 range: [-1.2, 2.8] + + x7.lb = 1 ( 0.6x0 + 0.6x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2 x1 - 0.8 : [-2, 0.4] + x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( 0.6x0 - 0.6x1 ) = 1.2 x1 + 0.8 : [-0.4, 2] + x7 range: [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. Using custom lower coefficient with + alpha = { 0.5 }. + Lower Coefficient (first LeakyReLU): ( 1 - 0.2 ) * 0.5 + 0.2 = 0.6 + Lower Bias (first LeakyReLU): 0 + Upper Coefficient (first LeakyReLU): ( 2.8 - 0.2*-1.2 )/( 2.8--1.2 ) = 3.04/4 = 0.76 + Upper Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -1.2 / ( 2.8--1.2 ) = 0.672 + + Lower Coefficient (second LeakyReLU): ( 1 - 0.2 ) * 0.5 + 0.2 = 0.6 + Lower Bias (second LeakyReLU): 0 + Upper Coefficient (second LeakyReLU): ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Upper Bias (second LeakyReLU): ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 + + x8.lb = 0.6 ( 1.2x0 ) = 0.72 x0 + x8.ub = 0.76 ( 1.2x0 + 1.6 ) + 0.76 = 0.912 x0 + 1.888 + x8 range: [-0.24, 2.8] + + x9.lb = 0.6 ( 1.2x1 - 0.8 ) = 0.72 x0 - 0.48 + x9.ub = 0.6 ( 1.2x1 + 0.8 ) + 0.8 = 0.72 x1 + 1.28 + x9 range: [-0.4, 2] + + Layer 3: + + x10.lb = 1 ( 0.72 x0 ) + 1 ( 0.72 x0 - 0.48 ) + 1 = 1.44 x0 + 0.52 : [-0.92, 2] + x10.ub = 1 ( 0.912 x0 + 1.888 ) + 1 ( 0.72 x1 + 1.28 ) + 1 = 0.912 x0 + 0.72 x1 + 4.168 + : [2.536, 5.8] + x10 range: [-0.92, 5.8] + + x11.lb = 0.72 x0 - 0.48 : [-1.2, 0.24] + x11.ub = 0.72 x1 + 1.28 : [-0.56, 2] + x11 range: [-1.2, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.4, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -0.4, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1.2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -0.24, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), + Tightening( 9, -0.4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -0.92, Tightening::LB ), Tightening( 10, 5.8, Tightening::UB ), + Tightening( 11, -1.2, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_sigmoids_and_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSigmoidsAndRound( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + // Layer 1 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); + + // Layer 2 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); + + // Layer 3 + /* + Double-check with Python + --- + from math import exp as e + def g(x): + return 1 / (1 + e(-x)) + + def g_prime(x): + return g(x) * (1 - g(x)) + + def lam(l, u): + return (g(u) - g(l)) / (u - l) + + def lam_prime(l, u): + return min(g_prime(l), g_prime(u)) + + l3 = l4 = -2 + u3 = u4 = 2 + l5 = l6 = g(-2) + u5 = u6 = g(2) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) + x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) + x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) + x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) + print(x7_l) + print(x7_u) + print(x8_l) + print(x8_u) + --- + [output]: + 0.4483930148512481 + 1.5516069851487517 + -0.5516069851487517 + 0.5516069851487517 + */ + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); + + // Layer 4 + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + } + + void test_parameterised_sbt_max_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 2] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 3] + x2.ub = x0 + x1 : [-2, 3] + + x3.lb = x0 - x1 : [-3, 2] + x3.ub = x0 - x1 : [-3, 2] + + Both ReLUs are undecided, bounds are concretized. LbOfUb > 0, using standard ReLU lower + coefficient. Coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6 Coefficient (second ReLU): + 2/( 2--3 ) = 2/5 = 0.4 + + x4.lb = 0.6 ( x0 + x1 ) = 0.6x0 + 0.6x1 + x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 + x4 range: [0, 3] + + x5.lb = 0.4 ( x0 - x1 ) = 0.4x0 + 0.4x1 + x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 + x5 range: [0, 2] + + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x4, and its upper bound is constant 3. + + x6.lb = 0.6x0 + 0.6x1 : [-1.2, 1.8] + x6.ub = 3 : [3, 3] + x6 range: [-1.2, 3] + + Layer 3: + + x7.lb = 2 ( 0.6x0 + 0.6x1 ) = 1.2x0 + 1.8x1 : [-2.4, 3.6] + x7.ub = 2 ( 3 ) = 6 : [6, 6] + x7 range: [-2.4, 6] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 3, Tightening::UB ), + Tightening( 3, -3, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2.4, Tightening::LB ), + Tightening( 7, 6, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_max_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -3 ); + tableau.setUpperBound( 1, -2 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-3, -2] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 0] + x2.ub = x0 + x1 : [-2, 0] + + x3.lb = x0 - x1 : [3, 5] + x3.ub = x0 - x1 : [3, 5] + + First ReLU is negative, bounds become constant 0 + Second ReLU is positive, bounds survive the activation + + x4: all set to 0 + + x5.lb = x0 - x1 : [3, 5] + x5.ub = x0 - x1 : [3, 5] + + Max is fixed because x5.lb > x4.ub, it inherits x5's bounds + + x6.lb = x0 - x1 : [3, 5] + x6.ub = x0 - x1 : [3, 5] + + Layer 3: + + x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 0, Tightening::UB ), + Tightening( 3, 3, Tightening::LB ), + Tightening( 3, 5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 3, Tightening::LB ), + Tightening( 5, 5, Tightening::UB ), + Tightening( 6, 3, Tightening::LB ), + Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 6, Tightening::LB ), + Tightening( 7, 10, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_softmax1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + } + + void test_parameterised_sbt_softmax2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + } + + void test_parameterised_sbt_softmax3() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax2( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + + List expectedBounds( + { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), + Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), + Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), + Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), + Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), + Tightening( 13, 0.9470, Tightening::LB ), Tightening( 13, 0.9470, Tightening::UB ), + Tightening( 14, -0.9470, Tightening::LB ), Tightening( 14, -0.9470, Tightening::UB ), + Tightening( 15, 1.0253, Tightening::LB ), Tightening( 15, 1.0253, Tightening::UB ), + Tightening( 16, -1.0253, Tightening::LB ), Tightening( 16, -1.0253, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_sbt_bilinear() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedSymbolicBoundPropagation( coeffs ) ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-2, 1] + + Layer 1: + + x2.lb = x0 - 2x1 : [-1, 6] + x2.ub = x0 - 2x1 : [-1, 6] + + x3.lb = x0 + x1 : [-1, 3] + x3.ub = x0 + x1 : [-1, 3] + + Using custom coefficients with alpha = { 0.5, 0.5 }. + Coefficients for bilinear layer: + Lower bound: + alpha_l = 0.5 x3.lb + ( 1 - 0.5 ) x3.ub = 0.5 * -1 + 0.5 * 3 = 1 + beta_l = 0.5 x2.lb + ( 1 - 0.5 ) x2.ub = 0.5 * -1 + 0.5 * 6 = 2.5 + gamma_l = -0.5 x2.lb x3.lb - ( 1 - 0.5 ) x2.ub x3.ub = -0.5 * -1 * -1 - 0.5 * 6 * 3 = + -9.5. + + Upper bound: + alpha_l = 0.5 x3.ub + ( 1 - 0.5 ) x3.lb = 0.5 * -1 + 0.5 * 3 = 1 + beta_l = 0.5 x2.lb + ( 1 - 0.5 ) x2.ub = 0.5 * -1 + 0.5 * 6 = 2.5 + gamma_l = -0.5 x2.lb x3.ub - ( 1 - 0.5 ) x2.ub x3.lb = -0.5 * -1 * 6 - 0.5 * -1 * 3 + = 4.5. + + x4.lb = 1 ( x0 - 2x1 ) + 2.5 ( x0 + x1 ) - 9.5 = 3.5 x0 + 0.5 x1 - 9.5 : [-7, -2] + x4.ub = 1 ( x0 - 2x1 ) + 2.5 ( x0 + x1 ) + 4.5 = 3.5 x0 + 0.5 x1 + 4.5 : [7, 12] + x4 range: [-7, 12] + + Layer 3: + + x5.lb = -1 ( 3.5 x0 + 0.5 x1 + 4.5 ) = -3.5 x0 - 0.5 x1 - 4.5 : [-12, 0] + x5.ub = -1 ( 3.5 x0 + 0.5 x1 - 9.5 ) = -3.5 x0 - 0.5 x1 + 9.5 : [2, 7] + x5 range: [-12, 7] + */ + + List expectedBounds( { Tightening( 2, -1, Tightening::LB ), + Tightening( 2, 6, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 3, Tightening::UB ), + Tightening( 4, -7, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), + Tightening( 5, 7, Tightening::UB ) } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + void test_concretize_input_assignment() { NLR::NetworkLevelReasoner nlr; diff --git a/src/nlr/tests/Test_PMNR.h b/src/nlr/tests/Test_PMNR.h deleted file mode 100644 index ce4e723758..0000000000 --- a/src/nlr/tests/Test_PMNR.h +++ /dev/null @@ -1,11387 +0,0 @@ -/********************* */ -/*! \file Test_LPRelaxation.h - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz, Andrew Wu - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - -**/ - -#include "../../engine/tests/MockTableau.h" // TODO: fix this -#include "DeepPolySoftmaxElement.h" -#include "FloatUtils.h" -#include "Layer.h" -#include "NetworkLevelReasoner.h" -#include "Options.h" -#include "Query.h" -#include "Tightening.h" -#include "Vector.h" - -#include - -class MockForNetworkLevelReasoner -{ -public: -}; - -class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -{ -public: - MockForNetworkLevelReasoner *mock; - - void setUp() - { - TS_ASSERT( mock = new MockForNetworkLevelReasoner ); - } - - void tearDown() - { - TS_ASSERT_THROWS_NOTHING( delete mock ); - } - - void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 R -1 R -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ R / \ R / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using ReLU activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardReLU2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = ReLU( x3 ) - x8 = ReLU( x4 ) - x9 = ReLU( x5 ) - x10 = ReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = ReLU( x11 ) - x15 = ReLU( x12 ) - x16 = ReLU( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = ReLU( x17 ) - x20 = ReLU( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::RELU, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 S -1 S -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ S / \ S / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Sigmoid activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardSigmoid2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Sigmoid( x3 ) - x8 = Sigmoid( x4 ) - x9 = Sigmoid( x5 ) - x10 = Sigmoid( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Sigmoid( x11 ) - x15 = Sigmoid( x12 ) - x16 = Sigmoid( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Sigmoid( x17 ) - x20 = Sigmoid( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 Sign -1 Sign -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ Sign / \ Sign / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Sign activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardSign2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Sign( x3 ) - x8 = Sign( x4 ) - x9 = SIgn( x5 ) - x10 = Sign( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Sign( x11 ) - x15 = Sign( x12 ) - x16 = Sign( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Sign( x17 ) - x20 = Sign( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 Rnd -1 Rnd -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ Rnd / \ Rnd / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Round activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Round sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardRound2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Round( x3 ) - x8 = Round( x4 ) - x9 = Round( x5 ) - x10 = Round( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Round( x11 ) - x15 = Round( x12 ) - x16 = Round( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Round( x17 ) - x20 = Round( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Round sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardAbs( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 A -1 A -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ A / \ A / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Absolute value activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardAbs2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Abs( x3 ) - x8 = Abs( x4 ) - x9 = Abs( x5 ) - x10 = Abs( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Abs( x11 ) - x15 = Abs( x12 ) - x16 = Abs( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Abs( x17 ) - x20 = Abs( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 LR -1 LR -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ LR / \ LR / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardLeakyRelu2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = LeakyReLU( x3 ) - x8 = LeakyReLU( x4 ) - x9 = LeakyReLU( x5 ) - x10 = LeakyReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = LeakyReLU( x11 ) - x15 = LeakyReLU( x12 ) - x16 = LeakyReLU( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = LeakyReLU( x17 ) - x20 = LeakyReLU( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - nlr.getLayer( 6 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - a a' - x e - b b' f h - y d - c c' - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::MAX, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - - void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 - x1 x12 x15 x17 - x5 x9 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7, x8, x9, x10 = Softmax( x2, x3, x4, x5 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14, x15, x16 = Softmax( x11, x12, x13 ) - - x17 = Max( x14, x15, x16 ) - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 5, NLR::Layer::MAX, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 3, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 2 ); - nlr.addActivationSource( 1, 0, 2, 3 ); - nlr.addActivationSource( 1, 1, 2, 3 ); - nlr.addActivationSource( 1, 2, 2, 3 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - nlr.addActivationSource( 3, 2, 4, 0 ); - nlr.addActivationSource( 3, 0, 4, 1 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 1 ); - nlr.addActivationSource( 3, 0, 4, 2 ); - nlr.addActivationSource( 3, 1, 4, 2 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 4, 0, 5, 0 ); - nlr.addActivationSource( 4, 1, 5, 0 ); - nlr.addActivationSource( 4, 2, 5, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 18 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - } - - void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - a a' - x e - b b' f h - y d - c c' - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU/Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - x3 x7 - x0 - x4 x8 x11 x13 - x1 x15 - x5 x9 x12 x14 - x2 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = ReLU( x3 ) - x8 = ReLU( x4 ) - x9 = ReLU( x5 ) - x10 = ReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - - x13 = ReLU( x11 ) - x14 = ReLU( x12 ) - - x15 = x13 * x14 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::BILINEAR, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setBias( 3, 0, 2 ); - - // Mark the ReLU/Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - nlr.addActivationSource( 4, 0, 5, 0 ); - nlr.addActivationSource( 4, 1, 5, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 13 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 14 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 15 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 16 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - } - - void test_invprop_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), - Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 8, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), - - Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 8, 0, Tightening::LB ), - Tightening( 9, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), - - Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), - Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), - - Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), - - Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), - - Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 19, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), - Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), - Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), - - Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), - Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), - Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), - - Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 20, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_sigmoid() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), - Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), - - Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), - Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), - - Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), - Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), - - Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), - Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), - Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), - - Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), - Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), - - Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), - Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), - - Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), - Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_sigmoid2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), - Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), - Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), - Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), - - Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), - Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), - Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), - - Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), - Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), - Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), - - Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), - Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), - - Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), - Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), - - Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), - Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), - Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), - - Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), - Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), - Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), - - Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), - Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), - Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), - - Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), - Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), - - Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), - Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), - - Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_abs() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), - - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), - - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_abs2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), - - Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), - - Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_round() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 11, -4, Tightening::LB ), - Tightening( 11, 6, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), - - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -10, Tightening::LB ), - Tightening( 7, 5, Tightening::UB ), - - Tightening( 11, -14, Tightening::LB ), - Tightening( 11, 11, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_round2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), - Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), - Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), - - Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), - Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), - - Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), - Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), - - Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), - Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), - Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), - Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), - - Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_sign() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_sign2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_leaky_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 4, -0.1, Tightening::LB ), - - Tightening( 8, -0.045, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), - - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), - - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), - - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_leaky_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyRelu2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), - Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), - Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), - Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), - Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), - - Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), - Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), - - Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), - Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), - - Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.3, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - Tightening( 10, -0.6, Tightening::LB ), - - Tightening( 14, -0.9, Tightening::LB ), - Tightening( 15, -0.89, Tightening::LB ), - Tightening( 16, -0.77, Tightening::LB ), - - Tightening( 19, -2.3133, Tightening::LB ), - Tightening( 20, -1.2, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), - Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), - Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), - Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), - Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), - - Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), - Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), - - Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), - Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), - - Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.5, Tightening::LB ), - Tightening( 9, -0.6, Tightening::LB ), - Tightening( 10, -1.5, Tightening::LB ), - - Tightening( 14, -1.1, Tightening::LB ), - Tightening( 15, -2.1771, Tightening::LB ), - Tightening( 16, -1.15, Tightening::LB ), - - Tightening( 19, -5.6259, Tightening::LB ), - Tightening( 20, -1.9, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_softmax_and_max() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), - Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), - Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), - - Tightening( 8, -0.1870, Tightening::LB ), Tightening( 8, 1.4488, Tightening::UB ), - Tightening( 9, 0.6814, Tightening::LB ), Tightening( 9, 2.2432, Tightening::UB ), - - Tightening( 10, 0.6814, Tightening::LB ), Tightening( 10, 2.2432, Tightening::UB ), - - Tightening( 11, -2.2432, Tightening::LB ), Tightening( 11, -0.6814, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.2194, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), - - Tightening( 10, 0.2194, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), - - Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.2194, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_softmax_and_max2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), - Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), - Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), - Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), - - Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9634, Tightening::UB ), - Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9152, Tightening::UB ), - Tightening( 13, -2.9868, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), - - Tightening( 14, 0.1143, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8694, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4596, Tightening::UB ), - - Tightening( 17, 0.1143, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), - - Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9326, Tightening::UB ), - Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), - Tightening( 13, -2.9990, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), - - Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), - - Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_relu_and_bilinear() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), - - Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_relu_and_bilinear2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), - Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), - - Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), - Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), - - Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), - Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 8, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), - - Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 8, 0, Tightening::LB ), - Tightening( 9, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), - - Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), - Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), - - Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), - - Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), - - Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 19, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), - Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), - Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), - - Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), - Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), - Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), - - Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 20, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_sigmoid() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), - Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), - - Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), - Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), - - Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), - Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), - - Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), - Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), - Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), - - Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), - Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), - - Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), - Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), - - Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), - Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_sigmoid2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), - Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), - Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), - Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), - - Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), - Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), - Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), - - Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), - Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), - Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), - - Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), - Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), - - Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), - Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), - - Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), - Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), - Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), - - Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), - Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), - Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), - - Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), - Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), - Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), - - Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), - Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), - - Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), - Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), - - Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_abs() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), - - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), - - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_abs2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), - - Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), - - Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_round() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 11, -4, Tightening::LB ), - Tightening( 11, 6, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), - - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -10, Tightening::LB ), - Tightening( 7, 5, Tightening::UB ), - - Tightening( 11, -14, Tightening::LB ), - Tightening( 11, 11, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_round2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), - Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), - Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), - - Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), - Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), - - Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), - Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), - - Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), - Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), - Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), - Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), - - Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_sign() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_sign2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_leaky_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 4, -0.1, Tightening::LB ), - - Tightening( 8, -0.045, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), - - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), - - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), - - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_leaky_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyRelu2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), - Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), - Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), - Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), - Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), - - Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), - Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), - - Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), - Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), - - Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.3, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - Tightening( 10, -0.6, Tightening::LB ), - - Tightening( 14, -0.9, Tightening::LB ), - Tightening( 15, -0.89, Tightening::LB ), - Tightening( 16, -0.77, Tightening::LB ), - - Tightening( 19, -2.3133, Tightening::LB ), - Tightening( 20, -1.2, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), - Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), - Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), - Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), - Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), - - Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), - Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), - - Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), - Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), - - Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.5, Tightening::LB ), - Tightening( 9, -0.6, Tightening::LB ), - Tightening( 10, -1.5, Tightening::LB ), - - Tightening( 14, -1.1, Tightening::LB ), - Tightening( 15, -2.1771, Tightening::LB ), - Tightening( 16, -1.15, Tightening::LB ), - - Tightening( 19, -5.6259, Tightening::LB ), - Tightening( 20, -1.9, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_softmax_and_max() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), - Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), - Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), - - Tightening( 8, -0.1870, Tightening::LB ), Tightening( 8, 1.4488, Tightening::UB ), - Tightening( 9, 0.6814, Tightening::LB ), Tightening( 9, 2.2432, Tightening::UB ), - - Tightening( 10, 0.6814, Tightening::LB ), Tightening( 10, 2.2432, Tightening::UB ), - - Tightening( 11, -2.2432, Tightening::LB ), Tightening( 11, -0.6814, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.2194, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), - - Tightening( 10, 0.2194, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), - - Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.2194, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_softmax_and_max2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), - Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), - Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), - Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), - - Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9634, Tightening::UB ), - Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9152, Tightening::UB ), - Tightening( 13, -2.9868, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), - - Tightening( 14, 0.1143, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8694, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4596, Tightening::UB ), - - Tightening( 17, 0.1143, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), - - Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9326, Tightening::UB ), - Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), - Tightening( 13, -2.9990, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), - - Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), - - Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_relu_and_bilinear() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), - - Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_relu_and_bilinear2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), - Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), - - Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), - Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), - - Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - - void test_pmnr_gradient_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), - Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 8, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), - - Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 8, 0, Tightening::LB ), - Tightening( 9, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), - - Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), - Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), - - Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), - - Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), - - Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 19, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), - Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), - Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), - - Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), - Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), - Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), - - Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 20, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_sigmoid() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), - Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), - - Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), - Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), - - Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), - Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), - - Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), - Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), - Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), - - Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), - Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), - - Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), - Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), - - Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), - Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_sigmoid2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), - Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), - Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), - Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), - - Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), - Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), - Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), - - Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), - Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), - Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), - - Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), - Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), - - Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), - Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), - - Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), - Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), - Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), - - Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), - Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), - Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), - - Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), - Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), - Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), - - Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), - Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), - - Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), - Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), - - Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_abs() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), - - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), - - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_abs2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), - - Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), - - Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_round() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 11, -4, Tightening::LB ), - Tightening( 11, 6, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), - - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -10, Tightening::LB ), - Tightening( 7, 5, Tightening::UB ), - - Tightening( 11, -14, Tightening::LB ), - Tightening( 11, 11, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_round2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), - Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), - Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), - - Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), - Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), - - Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), - Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), - - Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), - Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), - Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), - Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), - - Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_sign() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_sign2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_leaky_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 4, -0.1, Tightening::LB ), - - Tightening( 8, -0.045, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), - - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), - - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), - - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_leaky_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyRelu2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), - Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), - Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), - Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), - Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), - - Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), - Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), - - Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), - Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), - - Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.3, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - Tightening( 10, -0.6, Tightening::LB ), - - Tightening( 14, -0.9, Tightening::LB ), - Tightening( 15, -0.89, Tightening::LB ), - Tightening( 16, -0.77, Tightening::LB ), - - Tightening( 19, -2.3133, Tightening::LB ), - Tightening( 20, -1.2, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), - Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), - Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), - Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), - Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), - - Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), - Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), - - Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), - Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), - - Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.5, Tightening::LB ), - Tightening( 9, -0.6, Tightening::LB ), - Tightening( 10, -1.5, Tightening::LB ), - - Tightening( 14, -1.1, Tightening::LB ), - Tightening( 15, -2.1771, Tightening::LB ), - Tightening( 16, -1.15, Tightening::LB ), - - Tightening( 19, -5.6259, Tightening::LB ), - Tightening( 20, -1.9, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_softmax_and_max() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), - Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), - Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), - - Tightening( 8, -0.1870, Tightening::LB ), Tightening( 8, 1.4488, Tightening::UB ), - Tightening( 9, 0.6814, Tightening::LB ), Tightening( 9, 2.2432, Tightening::UB ), - - Tightening( 10, 0.6814, Tightening::LB ), Tightening( 10, 2.2432, Tightening::UB ), - - Tightening( 11, -2.2432, Tightening::LB ), Tightening( 11, -0.6814, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.2194, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), - - Tightening( 10, 0.2194, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), - - Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.2194, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_softmax_and_max2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), - Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), - Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), - Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), - - Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9634, Tightening::UB ), - Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9152, Tightening::UB ), - Tightening( 13, -2.9868, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), - - Tightening( 14, 0.1143, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8694, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4596, Tightening::UB ), - - Tightening( 17, 0.1143, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), - - Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9326, Tightening::UB ), - Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), - Tightening( 13, -2.9990, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), - - Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), - - Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_relu_and_bilinear() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), - - Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_relu_and_bilinear2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), - Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), - - Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), - Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), - - Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), - Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 8, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), - - Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 8, 0, Tightening::LB ), - Tightening( 9, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), - - Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), - Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), - - Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), - - Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), - - Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 19, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), - Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), - Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), - - Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), - Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), - Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), - - Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 20, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_sigmoid() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), - Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), - - Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), - Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), - - Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), - Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), - - Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), - Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), - Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), - - Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), - Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), - - Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), - Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), - - Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), - Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_sigmoid2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), - Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), - Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), - Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), - - Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), - Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), - Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), - - Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), - Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), - Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), - - Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), - Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), - - Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), - Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), - - Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), - Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), - Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), - - Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), - Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), - Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), - - Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), - Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), - Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), - - Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), - Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), - - Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), - Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), - - Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_abs() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), - - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), - - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_abs2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), - - Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), - - Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_round() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 11, -4, Tightening::LB ), - Tightening( 11, 6, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), - - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -10, Tightening::LB ), - Tightening( 7, 5, Tightening::UB ), - - Tightening( 11, -14, Tightening::LB ), - Tightening( 11, 11, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_round2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), - Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), - Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), - - Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), - Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), - - Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), - Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), - - Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), - Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), - Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), - Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), - - Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_sign() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_sign2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_leaky_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 4, -0.1, Tightening::LB ), - - Tightening( 8, -0.045, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), - - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), - - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), - - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_leaky_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyRelu2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), - Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), - Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), - Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), - Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), - - Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), - Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), - - Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), - Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), - - Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.3, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - Tightening( 10, -0.6, Tightening::LB ), - - Tightening( 14, -0.9, Tightening::LB ), - Tightening( 15, -0.89, Tightening::LB ), - Tightening( 16, -0.77, Tightening::LB ), - - Tightening( 19, -2.3133, Tightening::LB ), - Tightening( 20, -1.2, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), - Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), - Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), - Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), - Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), - - Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), - Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), - - Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), - Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), - - Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.5, Tightening::LB ), - Tightening( 9, -0.6, Tightening::LB ), - Tightening( 10, -1.5, Tightening::LB ), - - Tightening( 14, -1.1, Tightening::LB ), - Tightening( 15, -2.1771, Tightening::LB ), - Tightening( 16, -1.15, Tightening::LB ), - - Tightening( 19, -5.6259, Tightening::LB ), - Tightening( 20, -1.9, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_softmax_and_max() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), - Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), - Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), - - Tightening( 8, -0.1870, Tightening::LB ), Tightening( 8, 1.4488, Tightening::UB ), - Tightening( 9, 0.6814, Tightening::LB ), Tightening( 9, 2.2432, Tightening::UB ), - - Tightening( 10, 0.6814, Tightening::LB ), Tightening( 10, 2.2432, Tightening::UB ), - - Tightening( 11, -2.2432, Tightening::LB ), Tightening( 11, -0.6814, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.2194, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), - - Tightening( 10, 0.2194, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), - - Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.2194, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_softmax_and_max2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), - Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), - Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), - Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), - - Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9634, Tightening::UB ), - Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9152, Tightening::UB ), - Tightening( 13, -2.9868, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), - - Tightening( 14, 0.1143, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8694, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4596, Tightening::UB ), - - Tightening( 17, 0.1143, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), - - Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9326, Tightening::UB ), - Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), - Tightening( 13, -2.9990, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), - - Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), - - Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_relu_and_bilinear() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), - - Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_relu_and_bilinear2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), - Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), - - Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), - Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), - - Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - bool boundsEqual( const List &bounds, const List &expectedBounds ) - { - if ( bounds.size() < expectedBounds.size() ) - return false; - - bool allFound = true; - for ( const auto &bound : bounds ) - { - bool currentFound = false; - for ( const auto &expectedBound : expectedBounds ) - { - currentFound |= - ( bound._type == expectedBound._type && - bound._variable == expectedBound._variable && - FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); - } - allFound &= currentFound; - } - return allFound; - } - - // Create list of all tightenings in newBounds for which there is no bound in newBounds or in - // bounds which is at least as tight. - List removeRedundancies( const List &bounds, - const List &newBounds ) - { - List minimalBounds; - - for ( const auto &newBound : newBounds ) - { - bool foundTighter = false; - for ( const auto &bound : bounds ) - { - foundTighter |= - ( newBound._type == bound._type && newBound._variable == bound._variable && - ( ( newBound._type == Tightening::LB && - FloatUtils::lte( newBound._value, bound._value, 0.0001 ) ) || - ( newBound._type == Tightening::UB && - FloatUtils::gte( newBound._value, bound._value, 0.0001 ) ) ) ); - } - - for ( const auto &bound : newBounds ) - { - foundTighter |= - ( newBound._type == bound._type && newBound._variable == bound._variable && - ( ( newBound._type == Tightening::LB && - FloatUtils::lt( newBound._value, bound._value, 0.0001 ) ) || - ( newBound._type == Tightening::UB && - FloatUtils::gt( newBound._value, bound._value, 0.0001 ) ) ) ); - } - - if ( !foundTighter ) - minimalBounds.append( newBound ); - } - return minimalBounds; - } - - void updateTableau( MockTableau &tableau, List &tightenings ) - { - for ( const auto &tightening : tightenings ) - { - if ( tightening._type == Tightening::LB ) - { - tableau.setLowerBound( tightening._variable, tightening._value ); - } - - if ( tightening._type == Tightening::UB ) - { - tableau.setUpperBound( tightening._variable, tightening._value ); - } - } - } -}; diff --git a/src/nlr/tests/Test_PMNRSelection.h b/src/nlr/tests/Test_PMNRSelection.h new file mode 100644 index 0000000000..d771680107 --- /dev/null +++ b/src/nlr/tests/Test_PMNRSelection.h @@ -0,0 +1,8821 @@ +/********************* */ +/*! \file Test_NetworkLevelReasoner2.h + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Andrew Wu + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + +**/ + +#include "../../engine/tests/MockTableau.h" // TODO: fix this +#include "FloatUtils.h" +#include "Layer.h" +#include "NetworkLevelReasoner.h" +#include "Options.h" +#include "Query.h" +#include "Tightening.h" +#include "Vector.h" + +#include + +class MockForNetworkLevelReasoner +{ +public: +}; + +class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite +{ +public: + MockForNetworkLevelReasoner *mock; + + void setUp() + { + TS_ASSERT( mock = new MockForNetworkLevelReasoner ); + } + + void tearDown() + { + TS_ASSERT_THROWS_NOTHING( delete mock ); + } + + void populateNetworkSBTRelu( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + 2 R 1 + x0 --- x2 ---> x4 --- x6 + \ / / + 1 \ / / + \/ -1 / + /\ / + 3 / \ / + / \ R / + x1 --- x3 ---> x5 + 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + } + + void populateNetworkSBTReluResidual1( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + -1 + __________________ + / \ + / 1 R -1 1 R 3 1 + x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 + \ / + \ 3 / + \________________________/ + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 2, NLR::Layer::RELU, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 4, NLR::Layer::RELU, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 1, 5 ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 3 ); + nlr.setWeight( 0, 0, 3, 0, -1 ); + nlr.setWeight( 1, 0, 5, 0, 3 ); + + + nlr.setBias( 3, 0, 1 ); + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 6 ); + tableau.setLowerBound( 1, -large ); + tableau.setUpperBound( 1, large ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + } + + void populateNetworkSBTReluResidual2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + -1 + __________________ + / \ + / 1 R -1 1 R 3 1 1 + x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 --- x6 + \ / + \ 1 / + \_______________________________/ + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 2, NLR::Layer::RELU, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 4, NLR::Layer::RELU, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 6, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 6; ++i ) + nlr.addLayerDependency( i - 1, i ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 0, 5 ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 3 ); + nlr.setWeight( 0, 0, 3, 0, -1 ); + nlr.setWeight( 0, 0, 5, 0, 1 ); + nlr.setWeight( 5, 0, 6, 0, 1 ); + + nlr.setBias( 3, 0, 1 ); + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 1, -large ); + tableau.setUpperBound( 1, large ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + } + + void populateNetworkSBTReluReindex( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 1 1 1 + x0 --- x2 x5 --- x6 x9 --- x10 + \ /\ /\ / \ / \ / + 1 \ / R\ /-1\ / R \ / 1 \ / + \/ \/ \/ \/ \/ + /\ /\ /\ /\ /\ + 1 / \ R/ \ 1/ \ R / \ 1 / \ + / \/ \/ \ / \ / 0 \ + x1 --- x3 x4 --- x7 x8 --- x11 + -1 1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 0 ); + + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 8 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkSBTLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 LR 1 LR 1 1 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 1 \ / 0 \ / + \/ \/ \/ + /\ /\ /\ + 1 / \ 1 / \ 1 / \ + / \ LR / \ LR / 1 \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + -1 -1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + using LeakyReLU activation instead of ReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.2 ); + nlr.getLayer( 4 )->setAlpha( 0.2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 0 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + + nlr.setBias( 5, 0, 1 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkSBTSigmoidsAndRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 S 1 Rd + x0 --- x2 ---> x4 --- x6 --- x8 + \ / \ / + 1 \ / 1 \ / + \/ \/ + /\ /\ + 1 / \ 1 / \ + / \ S / \ Rd + x1 --- x3 ---> x5 --- x7 --- x9 + -1 -1 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 4; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 10 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + } + + void populateNetworkSBTMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 R Max 2 + x0 --- x2 ---> x4 --- x6 ---> x7 + \ / / + 1 \ / / + \/ / + /\ / + 1 / \ / + / \ R / + x1 --- x3 ---> x5 + -1 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::MAX, 1 ); + nlr.addLayer( 4, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 4; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + nlr.setWeight( 3, 0, 4, 0, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Mark the Max sources + nlr.addActivationSource( 2, 0, 3, 0 ); + nlr.addActivationSource( 2, 1, 3, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 7 ); + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 8 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + } + + void populateNetworkSBTSoftmax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + + x0 x3 S x6 + + x1 x4 S x7 + + x2 x5 S x8 + + x3 = x0 - x1 + x2 + 1 + x4 = -x0 + x1 + x2 + 2 + x5 = -x0 - x1 - x2 + 3 + + x6 x7 x8 = softmax(x3, x4, x5) + + x9 = x6 + x7 + x8 + x10 = - x6 - x7 - x8 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -1 ); + nlr.setWeight( 0, 0, 1, 2, -1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 2, -1 ); + nlr.setWeight( 0, 2, 1, 0, 1 ); + nlr.setWeight( 0, 2, 1, 1, 1 ); + nlr.setWeight( 0, 2, 1, 2, -1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 2 ); + nlr.setBias( 1, 2, 3 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 8 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 11 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + } + + void populateNetworkSBTSoftmax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + + x0 x3 S x8 + + x1 x4 S x9 + + x2 x5 S x10 + + x6 S x11 + + x7 S x12 + + x3 = x0 - x1 + x2 + 1 + x4 = -x0 + x1 + x2 + 2 + x5 = -x0 - x1 - x2 + 3 + x6 = -x0 - x1 - x2 + 2 + x7 = -x0 - x1 - x2 + 1 + + x8 x10 x12 = softmax(x3, x5, x7) + + x9 x11 = softmax(x4, x6) + + x13 = x8 + x10 + x12 + x14 = -x8 - x10 - x12 + x15 = x9 + x11 + x16 = -x9 - x11 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 5 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 5 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 4 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -1 ); + nlr.setWeight( 0, 0, 1, 2, -1 ); + nlr.setWeight( 0, 0, 1, 3, -1 ); + nlr.setWeight( 0, 0, 1, 4, -1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 2, -1 ); + nlr.setWeight( 0, 1, 1, 3, -1 ); + nlr.setWeight( 0, 1, 1, 4, -1 ); + nlr.setWeight( 0, 2, 1, 0, 1 ); + nlr.setWeight( 0, 2, 1, 1, 1 ); + nlr.setWeight( 0, 2, 1, 2, -1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + nlr.setWeight( 0, 2, 1, 4, -1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 4, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + nlr.setWeight( 2, 4, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 2, 1 ); + nlr.setWeight( 2, 3, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 3, -1 ); + nlr.setWeight( 2, 3, 3, 3, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 2 ); + nlr.setBias( 1, 2, 3 ); + nlr.setBias( 1, 3, 2 ); + nlr.setBias( 1, 4, 1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 4, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 4, 2, 2 ); + nlr.addActivationSource( 1, 0, 2, 4 ); + nlr.addActivationSource( 1, 2, 2, 4 ); + nlr.addActivationSource( 1, 4, 2, 4 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 3 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 4 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 4 ), 12 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 3 ), 16 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 17 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + } + + void populateNetworkSBTBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + + x0 x2 + x x4 -- x5 + x1 x3 + + x2 = x0 - 2 * x1 + x3 = x0 + x1 + x4 = -x5 + + x4 = x2 * x3 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -2 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 5 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 6 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + } + + void test_symbolic_bound_maps_relus_all_active() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both ReLUs active, bound survive through activations: + + x2 <= x4 <= x2 + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => x2 - x3 <= x6 <= x2 - x3 + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + x2 <= x4 <= x2 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + x2 - x3 <= x6 <= x2 - x3 + + Layer 0: + Using x2 = 2x0 + 3x1, x3 = x0 + x1: + x0 + 2x1 <= x6 <= x0 + 2x1 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 1, 2 } ), + Vector( { 1, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + } + + void test_symbolic_bound_maps_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 30 + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + 0 <= x4 <= 0 + x4.lb = 0 + x4.ub = 0 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> -x3 <= x6 <= -x3 + x6.lb = -x0 - x1 : [-11, -5] + x6.ub = -x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0 <= x4 <= 0 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + -x3 <= x6 <= -x3 + + Layer 0: + Using x3 = x0 + x1: + -x0 - x1 <= x6 <= -x0 - x1 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + } + + void test_symbolic_bound_maps_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. 12 = ub > -lb = 4, using ReLU lower + coefficient of 1. Upper coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [-4, 12] + x2 <= x4 <= 0.75 x2 + 3 + x4.lb = 2x0 + 3x1 - 15 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> x2 - x3 <= x6 <= 0.75x2 - x3 + 3 + x6.lb = x0 + 2x1 - 15 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [4 + 2 - 15 = -9, 3 + 6.25 - 8.25 = 1] = [-9, 1] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -4, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -9, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + x2 <= x4 <= 0.75x2 + 3 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + x2 - x3 <= x6 <= 0.75x2 - x3 + 3 + + Layer 0: + Using x2 = 2x0 + 3x1, x3 = x0 + x1: + x0 + 2x1 - 15 <= x6 <= 0.5x0 + 1.25x1 - 8.25 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 1, 0, 0, 1 } ), + Vector( { 0.75, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 3, 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 1, -1 } ), + Vector( { 0.75, -1 } ), + Vector( { 0 } ), + Vector( { 3 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 1, 2 } ), + Vector( { 0.5, 1.25 } ), + Vector( { -15 } ), + Vector( { -8.25 } ) ); + } + + void test_symbolic_bound_maps_relus_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation + + 0 <= x4 <= 0 + x4.lb = 0 + x4.ub = 0 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> -x3 <= x6 <= -x3 + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0 <= x4 <= 0 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + -x3 <= x6 <= -x3 + + Layer 0: + Using x3 = x0 + x1: + -x0 - x1 <= x6 <= -x0 - x1 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + } + + void test_symbolic_bound_maps_relu_residual1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual1( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [-1, 1] + + Layers 1. 2: + + x1 = x0 + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. 1 = ub <= -lb = 1, using ReLU lower + coefficient of 0. Upper coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + 0 <= x2 <= 0.5x1 + 0.5 + x2.lb = 0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layers 3, 4 (with residual from x0): + + x3 = - x2 - x0 + 1 + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] + x3.ub = -1( 0 ) -1x0 + 1 = -x0 + 1 : [0, 2] + x3 range: [-1, 2] + + ReLU is undecided, bound is concretized. 2 = ub > -lb = 1, using ReLU lower + coefficient of 1. Upper coefficient: 2/( 2--1 ) = 2/3. + + x3 <= x4 <= 2/3 x3 + 2/3 + x4.lb = -1.5x0 + 0.5 + x4.ub = 2/3 ( -x0 + 1 ) + 2/3 = -2/3 x0 + 4/3 : [1, 2] + x4 range: [-1, 2] + + Layer 5 (with residual from x1): + + x5 = 3x4 + 3x1 + 1 + x5.lb = 3 ( -1.5x0 + 0.5 ) + 3 ( x0 ) + 1 = -1.5x0 + 2.5 : [1, 4] + x5.ub = 3 ( -2/3 x0 + 4/3 ) + 3 ( x0 ) + 1 = x0 + 5 : [4, 6] + x5 range: [1, 6] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 6, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0 <= x2 <= 0.5x1 + 0.5 + + Layer 4 (RELU): + x3 <= x4 <= 2/3 x3 + 2/3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 5: + x5 <= x5 <= x5 + + Layer 4: + Using x5 = 3x4 + 3x1 + 1: + 3x4 + 3x1 + 1 <= x5 <= 3x4 + 3x1 + 1 + Concretizing residual using x1 : [-1, 1]: 3x4 - 2 <= x5 <= 3x4 + 4 + + Layer 3: + Using x3 <= x4 <= 2/3 x3 + 2/3: + 3x3 + 3x1 + 1 <= x5 <= 2x3 + 3x1 + 3 + Concretizing residual using x1 : [-1, 1]: 3x3 - 2 <= x5 <= 2x3 + 6 + + Layer 2: + Using x3 = -x2 - x0 + 1: + -3x2 + 3x1 - 3x0 + 4 <= x5 <= -2x2 + 3x1 - 2x0 + 5 + Concretizing residual using x0 : [-1, 1], x1 : [-1, 1]: -3x2 - 2 <= x5 <= -2x2 + 10 + + Layer 1: + Using 0 <= x2 <= 0.5x1 + 0.5: + 1.5x1 - 3x0 + 2.5 <= x5 <= 3x1 - 2x0 + 5 + Concretizing residual using x0 : [-1, 1]: 1.5x1 - 0.5 <= x5 <= 3x1 + 7 + + Layer 0: + Using x1 = x0: + -1.5x0 + 2.5 <= x5 <= x0 + 5 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0 } ), + Vector( { 0.5 } ), + Vector( { 0 } ), + Vector( { 0.5 } ) ); + comparePredecessorLayerSymbolicBounds( nlr, + 4, + Vector( { 1 } ), + Vector( { 0.6667 } ), + Vector( { 0 } ), + Vector( { 0.6667 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 5, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 4, + Vector( { 3 } ), + Vector( { 3 } ), + Vector( { -2 } ), + Vector( { 4 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 3 } ), + Vector( { 2 } ), + Vector( { -2 } ), + Vector( { 6 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { -3 } ), + Vector( { -2 } ), + Vector( { -2 } ), + Vector( { 10 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 1.5 } ), + Vector( { 3 } ), + Vector( { -0.5 } ), + Vector( { 7 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { -1.5 } ), + Vector( { 1 } ), + Vector( { 2.5 } ), + Vector( { 5 } ) ); + } + + void test_symbolic_bound_maps_relu_residual2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [-1, 1] + + Layers 1, 2: + + x1 = x0 + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. 1 = ub <= -lb = 1, using ReLU lower + coefficient of 0. Upper coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + 0.5 x1 <= x2 <= 0.5x1 + 0.5 + x2.lb = 0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layers 3, 4 (with residual from x0): + + x3 = - x2 - x0 + 1 + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] + x3.ub = -1( 0 ) -1x0 + 1 = -x0 + 1 : [0, 2] + x3 range: [-1, 2] + + ReLU is undecided, bound is concretized. 2 = ub > -lb = 1, using ReLU lower + coefficient of 1. Upper coefficient: 2/( 2--1 ) = 2/3. + + x3 <= x4 <= 2/3 x3 + 2/3 + x4.lb = -1.5x0 + 0.5 + x4.ub = 2/3 ( -x0 + 1 ) + 2/3 = -2/3 x0 + 4/3 : [1, 2] + x4 range: [-1, 2] + + Layer 5 (with residual from x0): + + x5 = 3x4 + x0 + 1 + x5.lb = 3 ( -1.5x0 + 0.5 ) + 1 ( x0 ) + 1 = -3.5x0 + 2.5 : [-1, 6] + x5.ub = 3 ( -2/3 x0 + 4/3 ) + 1 ( x0 ) + 1 = -x0 + 5 : [4, 6] + x5 range: [-1, 6] + + Layer 6: + x6 = x5 + x6.lb = -3.5x0 + 2.5 : [-1, 6] + x6.ub = -x0 + 5 : [4, 6] + x6 range: [-1, 6] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), + Tightening( 5, 6, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, 6, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0 <= x2 <= 0.5x1 + 0.5 + + Layer 4 (RELU): + x3 <= x4 <= 2/3 x3 + 2/3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 6: + x6 <= x6 <= x6 + + Layer 5: + Using x6 = x5: + x5 <= x6 <= x5 + + Layer 4: + Using x5 = 3x4 + x0 + 1: + 3x4 + x0 + 1 <= x6 <= 3x4 + x0 + 1 + Concretizing residual using x0 : [-1, 1]: 3x4 <= x6 <= 3x4 + 2 + + Layer 3: + Using x3 <= x4 <= 2/3 x3 + 2/3: + 3x3 + x0 + 1 <= x6 <= 2x3 + x0 + 3 + Concretizing residual using x0 : [-1, 1]: 3x3 <= x6 <= 2x3 + 4 + + Layer 2: + Using x3 = -x2 - x0 + 1: + -3x2 - 2x0 + 4 <= x6 <= -2x2 - x0 + 5 + Concretizing residual using x0 : [-1, 1]: -3x2 + 2 <= x6 <= -2x2 + 6 + + Layer 1: + Using 0 <= x2 <= 0.5x1 + 0.5: + -1.5x1 - 2x0 + 2.5 <= x6 <= -x0 + 5 + Concretizing residual using x0 : [-1, 1]: -1.5x1 + 0.5 <= x6 <= 6 + + Layer 0: + Using x1 = x0: + -3.5x0 + 2.5 <= x6 <= -x0 + 5 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0 } ), + Vector( { 0.5 } ), + Vector( { 0 } ), + Vector( { 0.5 } ) ); + comparePredecessorLayerSymbolicBounds( nlr, + 4, + Vector( { 1 } ), + Vector( { 0.6667 } ), + Vector( { 0 } ), + Vector( { 0.6667 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 6, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 5, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 4, + Vector( { 3 } ), + Vector( { 3 } ), + Vector( { 0 } ), + Vector( { 2 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 3 } ), + Vector( { 2 } ), + Vector( { 0 } ), + Vector( { 4 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { -3 } ), + Vector( { -2 } ), + Vector( { 2 } ), + Vector( { 6 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { -1.5 } ), + Vector( { 0 } ), + Vector( { 0.5 } ), + Vector( { 6 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { -3.5 } ), + Vector( { -1 } ), + Vector( { 2.5 } ), + Vector( { 5 } ) ); + } + + void test_symbolic_bound_maps_relu_reindex() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluReindex( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layers 1, 2: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both ReLUs are undecided, bounds are concretized. 2 = ub <= -lb = 2, using ReLU lower + coefficient of 0. Upper coefficient: 2/( 2--2 ) = 2/4 = 0.5 + + 0 <= x4 <= 0.5x2 + 1 + x4.lb = 0 + x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 + x4 range: [0, 2] + + 0 <= x5 <= 0.5x3 + 1 + x5.lb = 0 + x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 + x5 range: [0, 2] + + Layers 3, 4: + + x6 = x4 + x5 + x6.lb = 1 ( 0 ) + 1 ( 0 ) = 0 : [0, 0] + x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] + x6 range: [0, 3] + + x7 = x4 - x5 + x7.lb = 1 ( 0 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = - 0.5x0 + 0.5x1 - 1 : [-2, 0] + x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0 ) = 0.5x0 + 0.5x1 + 1 : [0, 2] + x7 range: [-2, 2] + + First ReLU is active, bounds surive the activation + Second ReLUs is undecided, bound is concretized. 2 = ub <= -lb = 2, using ReLU lower + coefficient of 0. Upper coefficient (second ReLU): 2/( 2--2 ) = 2/4 = 0.5 + + x6 <= x8 <= x6 + x8.lb = 0 + x8.ub = x0 + 2 + x8 range: [0, 3] + + 0 <= x9 <= 0.5 x7 + 1 + x9.lb = 0 + x9.ub = 0.5 ( 0.5x0 + 0.5x1 + 1 ) + 1 = 0.25x0 + 0.25x1 + 1.5 + x9 range: [0, 2] + + Layer 5: + x10 = x8 + x9 + 1 + x10.lb = 1 ( 0 ) + 1 ( 0 ) + 1 = 1 : [1, 1] + x10.ub = 1 ( x6 ) + 1 ( 0.5 x7 + 1 ) + 1 = 1 ( x4 + x5 ) + 1 ( 0.5 x4 - 0.5x5 + 1 ) + 1 + = 1.5x4 + 0.5x5 + 2 <= 0.75x2 + 0.25x3 + 4 = x0 + 0.5x1 + 4 : [2.5, 5.5] + x10 range: [1, 5.5] + + x11 = x9 + x11.lb = 0 + x11.ub = 0.25x0 + 0.25x1 + 1.5 + x11 range: [0, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, 1, Tightening::LB ), Tightening( 10, 5.5, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0 <= x4 <= 0.5x2 + 1 + 0 <= x5 <= 0.5x3 + 1 + + Layer 4 (RELU): + x6 <= x8 <= x6 + 0 <= x9 <= 0.5 x7 + 1 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 5: + x10 <= x10 <= x10 + x11 <= x11 <= x11 + + Layer 4: + Using x10 = x8 + x9 + 1, x11 = x9: + x8 + x9 + 1 <= x10 <= x8 + x9 + 1 + x9 <= x11 <= x9 + + Layer 3: + Using x6 <= x8 <= x6, 0 <= x9 <= 0.5 x7 + 1: + x6 + 1 <= x10 <= x6 + 0.5 x7 + 2 + 0 <= x11 <= 0.5 x7 + 1 + + Layer 2: + Using x6 = x4 + x5, x7 = x4 - x5: + x4 + x5 + 1 <= x10 <= 1.5x4 + 0.5x5 + 2 + 0 <= x11 <= 0.5x4 - 0.5x5 + 1 + + Layer 1: + Using 0 <= x4 <= 0.5x2 + 1, 0 <= x5 <= 0.5x3 + 1: + 1 <= x10 <= 0.75x2 + 0.25x3 + 4 + 0 <= x11 <= 0.25x2 + 1.5 + + Layer 0: + Using x2 = x0 + x1, x3 = x0 - x1: + 1 <= x10 <= x0 + 0.5x1 + 4 + 0 <= x11 <= 0.25x2 + 0.25x3 + 1.5 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0, 0, 0, 0 } ), + Vector( { 0, 0.5, 0.5, 0 } ), + Vector( { 0, 0 } ), + Vector( { 1, 1 } ) ); + + comparePredecessorLayerSymbolicBounds( nlr, + 4, + Vector( { 0, 1, 0, 0 } ), + Vector( { 0, 1, 0.5, 0 } ), + Vector( { 0, 0 } ), + Vector( { 1, 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 5, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 4, + Vector( { 1, 1, 1, 0 } ), + Vector( { 1, 1, 1, 0 } ), + Vector( { 1, 0 } ), + Vector( { 1, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 0 } ), + Vector( { 1, 0, 0.5, 0.5 } ), + Vector( { 1, 0 } ), + Vector( { 2, 1 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, 0, 1, 0 } ), + Vector( { 0.5, -0.5, 1.5, 0.5 } ), + Vector( { 1, 0 } ), + Vector( { 2, 1 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0, 0, 0, 0 } ), + Vector( { 0.75, 0.25, 0.25, 0 } ), + Vector( { 1, 0 } ), + Vector( { 4, 1.5 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 0, 0, 0, 0 } ), + Vector( { 1, 0.25, 0.5, 0.25 } ), + Vector( { 1, 0 } ), + Vector( { 4, 1.5 } ) ); + } + + void test_symbolic_bound_maps_abs_all_positive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both absolute values positive, bound survive through activations: + + x2 <= x4 <= x2 + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + x5 = x4 - x5 + => x2 - x3 <= x5 <= x2 - x3 + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (ABSOLUTE_VALUE): + x2 <= x4 <= x2 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + x2 - x3 <= x6 <= x2 - x3 + + Layer 0: + Using x2 = 2x0 + 3x1, x3 = x0 + x1: + x0 + 2x1 <= x6 <= x0 + 2x1 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 1, 2 } ), + Vector( { 1, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + } + + void test_symbolic_bound_maps_abs_positive_and_negative() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + x2 = 2x0 + 3x1 - 30 + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + -x2 <= x4 <= -x2 + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + x5 = x4 - x5 + => -x2 - x3 <= x5 <= -x2 - x3 + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (ABSOLUTE_VALUE): + -x2 <= x4 <= -x2 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using -x2 <= x4 <= -x2, x3 <= x5 <= x3: + -x2 - x3 <= x6 <= -x2 - x3 + + Layer 0: + Using x2 = 2x0 + 3x1 - 30, x3 = x0 + x1: + -3x0 - 4x1 + 30 <= x6 <= -3x0 - 4x1 + 30 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { -1, 0, 0, 1 } ), + Vector( { -1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { -3, -4 } ), + Vector( { -3, -4 } ), + Vector( { 30 } ), + Vector( { 30 } ) ); + } + + void test_symbolic_bound_maps_absolute_values_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is undecided, bounds are concretized. + Second absolute value is active, bounds surive the activation + + 0 <= x4 <= 12 + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => -x3 <= x6 <= -x3 + 12 + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (ABSOLUTE_VALUE): + 0 <= x4 <= 12 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using 0 <= x4 <= 12, x3 <= x5 <= x3: + -x3 <= x6 <= -x3 + 12 + + Layer 0: + Using x3 = x0 + x1: + -x0 - x1 <= x6 <= -x0 - x1 + 12 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 12, 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 0 } ), + Vector( { 12 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 12 } ) ); + } + + void test_symbolic_bound_maps_absolute_values_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + -x2 <= x4 <= -x2 + x4: all set to 3 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => -x2 - x3 <= x6 <= -x2 - x3 + => -x3 + 3 <= x6 <= -x3 + 3 + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (ABSOLUTE_VALUE): + -x2 <= x4 <= -x2 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using -x2 <= x4 <= -x2, x3 <= x5 <= x3: + -x2 - x3 <= x6 <= -x2 - x3 + x2 = -3 is eliminated. + -x3 + 3 <= x6 <= -x3 + 3 + + Layer 0: + Using x3 = x0 + x1: + - x0 - x1 + 3 <= x6 <= - x0 - x1 + 3 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { -1, 0, 0, 1 } ), + Vector( { -1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 3 } ), + Vector( { 3 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 3 } ), + Vector( { 3 } ) ); + } + + void test_symbolic_bound_maps_signs_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is undecided, bounds are concretized. + Second sign is active, bounds become constant 1 + Coefficient (first Sign, lower): 2/12 = 1/6. + Coefficient (first Sign, upper): -2/-4 = 1/2. + + 1/6 x2 - 1 <= x4 <= 1/2 x2 + 1 + x4.lb = 1/6 ( 2x0 + 3x1 - 15 ) - 1 = 2/6 x0 + 3/6 x1 - 21/6 + x4.ub = 1/2 ( 2x0 + 3x1 - 15 ) + 1 = x0 + 1.5x1 - 6.5 + x4 range: [-1, 1] + + 1 <= x5 <= 1 + x5.lb = 1 + x5.ub = 1 + x5 range: [1, 1] + + Layer 3: + + x6 = x4 - x5 : [-2, 0] + => 1/6 x2 - 2 <= x6 <= 1/2 x2 : [-8/3, 6] + x6.lb = 1 ( 2/6 x0 + 3/6 x1 - 21/6 ) - 1 ( 1 ) = 1/3 x0 + 1/2 x1 - 4.5 : [-16/6, 0] + x6.ub = 1 ( x0 + 1.5x1 - 6.5 ) - 1 ( 1 ) = x0 + 1.5x1 - 7.5 : [-2, 6] + + x6 range: [-2, 0] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, 0, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SIGN): + 1/6 x2 - 1 <= x4 <= 1/2 x2 + 1 + 1 <= x5 <= 1 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using 1/6 x2 - 1 <= x4 <= 1/2 x2 + 1, 1 <= x5 <= 1: + 1/6 x2 - 2 <= x6 <= 1/2 x2 + + Layer 0: + Using x2 = 2x0 + 3x1 - 15: + 1/3 x0 + 1/2 x1 - 4.5 <= x6 <= x0 + 1.5x1 - 7.5 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0.1667, 0, 0, 0 } ), + Vector( { 0.5, 0, 0, 0 } ), + Vector( { -1, 1 } ), + Vector( { 1, 1 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0.1667, 0 } ), + Vector( { 0.5, 0 } ), + Vector( { -2 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 0.3333, 0.5 } ), + Vector( { 1, 1.5 } ), + Vector( { -4.5 } ), + Vector( { -7.5 } ) ); + } + + void test_symbolic_bound_maps_signs_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is negative, bounds become constant -1 + Second sign is positive, bounds become constant 1 + + -1 <= x4 <= 1 + x4: all set to -1 + + 1 <= x5 <= 1 + x5: all set to 1 + + Layer 3: + + x6 = x5 - x4 + x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 + x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, -1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SIGN): + -1 <= x4 <= -1 + 1 <= x5 <= 1 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using -1 <= x4 <= -1, 1 <= x5 <= 1: + -2 <= x6 <= -2 + + Layer 0: + -2 <= x6 <= -2 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0 } ), + Vector( { -1, 1 } ), + Vector( { -1, 1 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { -2 } ), + Vector( { -2 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { -2 } ), + Vector( { -2 } ) ); + } + + void test_symbolic_bound_maps_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layers 1, 2: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Bias: ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 + + x2 <= x4 <= 0.6 x2 + 0.8 + x4.lb = x0 + x1 + x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6x0 + 0.6x1 + 0.8 + x4 range: [-2, 2] + + x3 <= x5 <= 0.6 x3 + 0.8 + x5.lb = x0 - x1 + x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6x0 - 0.6x1 + 0.8 + x5 range: [-2, 2] + + Layers 3, 4: + + x6 = x4 + x5 + => x2 + x3 <= x6 <= 0.6 x2 + 0.6 x3 + 1.6 + x6.lb = 1 ( x0 + x1 ) + 1 ( x0 - x1 ) = 2x0 : [-2, 2] + x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2x0 + 1.6 : [0.4, 2.8] + x6 range: [-2, 2.8] + + x7 = x4 - x5 + => x2 - 0.6x3 - 0.8 <= x6 <= 0.6 x2 - x3 + 0.8 + x7.lb = 1 ( x0 + x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( x0 - x1 ) = -0.4x0 + 1.6x1 + 0.8 : [-1.2, 2.8] + x7 range: [-2.8, 2.8] + + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient (first LeakyReLU): ( 2.8 - 0.2*-2 )/( 2.8--2 ) = 3.2/4.8 = 10/15 = 2/3 + Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2 / ( 2.8--2 ) = 14/15 + + Coefficient (second LeakyReLU): ( 2.8 - 0.2*-2.8 )/( 2.8--2.8 ) = 3.36/5.6 = 0.6 + Bias (second LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2.8 / ( 2.8--2.8 ) = 1.12 + + x6 <= x8 <= 10/15 x6 + 14/15 + x8.lb = 2x0 + x8.ub = 10/15 ( 1.2x0 + 1.6 ) + 14/15 = 0.8x0 + 2 + x8 range: [-2, 2.8] + + x7 <= x9 <= 0.6x7 + 1.12 + x9.lb = 0.4x0 + 1.6x1 - 0.8 + x9.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 + x9 range: [-0.56, 2.8] + + Layer 5: + + x10 = x8 + x9 + 1 + => x6 + x7 + 1 <= x10 <= 2/3 x6 + 0.6 x7 + 229/75 + => 2x4 + 1 <= x10 <= 19/15 x4 + 1/15 x5 + 229/75 + => 2x2 + 1 <= x10 <= 0.76 x2 + 0.04 x3 + 4.12 + x10.lb = 2x0 + 2x1 + 1 : [-3, 5] + x10.ub = 0.8 x0 + 0.72 x1 + 4.12 : [2.6, 5.64] + x10 range: [-3, 5.64] + + x11 = x9 + => x7 <= x11 <= 0.6x7 + 1.12 + => x4 - x5 <= x11 <= 0.6x4 - 0.6x5 + 1.12 + => x2 - 0.6x3 - 0.8 <= x11 <= 0.36 x2 - 0.6 x3 + 1.6 + x11.lb = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x11.ub = -0.24 x0 + 0.96 x1 + 1.6 : [0.4, 2.8] + x11 range: [-2.8, 2.8] + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -2, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -2, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), + Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), + Tightening( 9, -2.8, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 5.64, Tightening::UB ), + Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (LEAKY_RELU): + x2 <= x4 <= 0.6 x2 + 0.8 + x3 <= x5 <= 0.6 x3 + 0.8 + + Layer 4 (LEAKY_RELU): + x6 <= x8 <= 2/3 x6 + 14/15 + x7 <= x9 <= 0.6x7 + 1.12 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 5: + x10 <= x10 <= x10 + x11 <= x11 <= x11 + + Layer 4: + Using x10 = x8 + x9 + 1, x11 = x9: + x8 + x9 + 1 <= x10 <= x8 + x9 + 1 + x9 <= x11 <= x9 + + Layer 3: + Using x6 <= x8 <= 2/3 x6 + 14/15, x7 <= x9 <= 0.6x7 + 1.12: + x6 + x7 + 1 <= x10 <= 2/3 x6 + 0.6 x7 + 229/75 + x7 <= x11 <= 0.6x7 + 1.12 + + Layer 2: + Using x6 = x4 + x5, x7 = x4 - x5: + 2x4 + 1 <= x10 <= 19/15 x4 + 1/15 x5 + 229/75 + x4 - x5 <= x11 <= 0.6x4 - 0.6x5 + 1.12 + + Layer 1: + Using x2 <= x4 <= 0.6 x2 + 0.8, x3 <= x5 <= 0.6 x3 + 0.8: + 2x2 + 1 <= x10 <= 0.76 x2 + 0.04 x3 + 4.12 + x2 - 0.6x3 - 0.8 <= x11 <= 0.36 x2 - 0.6 x3 + 1.6 + + Layer 0: + Using x2 = x0 + x1, x3 = x0 - x1: + 2x0 + 2x1 + 1 <= x10 <= 0.8 x0 + 0.72 x1 + 4.12 + 0.4x0 + 1.6x1 - 0.8 <= x11 <= -0.24 x0 + 0.96 x1 + 1.6 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 1, 0, 0, 1 } ), + Vector( { 0.6, 0, 0, 0.6 } ), + Vector( { 0, 0 } ), + Vector( { 0.8, 0.8 } ) ); + + comparePredecessorLayerSymbolicBounds( nlr, + 4, + Vector( { 1, 0, 0, 1 } ), + Vector( { 0.6667, 0, 0, 0.6 } ), + Vector( { 0, 0 } ), + Vector( { 0.9333, 1.12 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 5, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 4, + Vector( { 1, 0, 1, 1 } ), + Vector( { 1, 0, 1, 1 } ), + Vector( { 1, 0 } ), + Vector( { 1, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 1, 1 } ), + Vector( { 0.6667, 0, 0.6, 0.6 } ), + Vector( { 1, 0 } ), + Vector( { 3.0533, 1.12 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 2, 1, 0, -1 } ), + Vector( { 1.2667, 0.6, 0.0667, -0.6 } ), + Vector( { 1, 0 } ), + Vector( { 3.0533, 1.12 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 2, 1, 0, -0.6 } ), + Vector( { 0.76, 0.36, 0.04, -0.6 } ), + Vector( { 1, -0.8 } ), + Vector( { 4.12, 1.6 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 2, 0.4, 2, 1.6 } ), + Vector( { 0.8, -0.24, 0.72, 0.96 } ), + Vector( { 1, -0.8 } ), + Vector( { 4.12, 1.6 } ) ); + } + + void test_symbolic_bound_maps_sigmoids_and_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSigmoidsAndRound( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + // Layer 1 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); + + // Layer 2 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); + + // Layer 3 + /* + Double-check with Python + --- + from math import exp as e + def g(x): + return 1 / (1 + e(-x)) + + def g_prime(x): + return g(x) * (1 - g(x)) + + def lam(l, u): + return (g(u) - g(l)) / (u - l) + + def lam_prime(l, u): + return min(g_prime(l), g_prime(u)) + + l3 = l4 = -2 + u3 = u4 = 2 + l5 = l6 = g(-2) + u5 = u6 = g(2) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) + x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) + x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) + x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) + print(x7_l) + print(x7_u) + print(x8_l) + print(x8_u) + + ''' + Sigmoid linear relaxation ( Layer 2 ): + x4 >= lambda7_prime * x2 + ( g(l3) - lambda7_prime * l3 ) + x4 <= lambda7_prime * x2 + ( g(u3) - lambda7_prime * u3 ) + x5 >= lambda8_prime * x3 + ( g(l4) - lambda8_prime * l4 ) + x5 <= lambda8_prime * x3 + ( g(u4) - lambda7_prime * u4 ) + ''' + print('------------------') + print(lambda7_prime) + print(lambda8_prime) + print(g(l3) - lambda7_prime * l3) + print(g(u3) - lambda7_prime * u3) + print(g(l4) - lambda8_prime * l4) + print(g(u4) - lambda8_prime * u4) + + + --- + [output]: + 0.4483930148512481 + 1.5516069851487517 + -0.5516069851487517 + 0.5516069851487517 + ------------------ + 0.1049935854035065 + 0.1049935854035065 + 0.3291900928291306 + 0.6708099071708693 + 0.3291900928291306 + 0.6708099071708693 + */ + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); + + // Layer 4 + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SIGMOID): + 0.1050 x2 + 0.3292 <= x4 <= 0.1050 x2 + 0.6708 + 0.1050 x3 + 0.3292 <= x5 <= 0.1050 x3 + 0.6708 + + Layer 4 (ROUND): + x6 - 0.5 <= x8 <= x6 + 0.5 + x7 - 0.5 <= x9 <= x7 + 0.5 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 4: + x8 <= x8 <= x8 + x9 <= x9 <= x9 + + Layer 3: + Using x6 - 0.5 <= x8 <= x6 + 0.5, x7 - 0.5 <= x9 <= x7 + 0.5: + x6 - 0.5 <= x8 <= x6 + 0.5 + x7 - 0.5 <= x9 <= x7 + 0.5 + + Layer 2: + Using x6 = x4 + x5, x7 = x4 - x5: + x4 + x5 - 0.5 <= x8 <= x4 + x5 + 0.5 + x4 - x5 - 0.5 <= x9 <= x4 - x5 + 0.5 + + Layer 1: + Using + 0.1050 x2 + 0.3292 <= x4 <= 0.1050 x2 + 0.6708, + 0.1050 x3 + 0.3292 <= x5 <= 0.1050 x3 + 0.6708: + 0.1050 x2 + 0.1050 x3 + 0.1584 <= x8 <= 0.1050 x2 + 0.1050 x3 + 1.8416 + 0.1050 x2 - 0.1050 x3 - 0.8416 <= x9 <= 0.1050 x2 - 0.1050 x3 + 0.8516 + + Layer 0: + Using x2 = x0 + x1, x3 = x0 - x1: + 0.2100 x0 + 0.1584 <= x8 <= 0.2100 x0 + 1.8416 + 0.2100 x1 - 0.8416 <= x9 <= 0.2100 x1 + 0.8516 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0.1050, 0, 0, 0.1050 } ), + Vector( { 0.1050, 0, 0, 0.1050 } ), + Vector( { 0.3292, 0.3292 } ), + Vector( { 0.6708, 0.6708 } ) ); + comparePredecessorLayerSymbolicBounds( nlr, + 4, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 4, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, 1, 1, -1 } ), + Vector( { 1, 1, 1, -1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), + Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), + Vector( { 0.1584, -0.8416 } ), + Vector( { 1.8416, 0.8416 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 0.2100, 0, 0, 0.2100 } ), + Vector( { 0.2100, 0, 0, 0.2100 } ), + Vector( { 0.1584, -0.8416 } ), + Vector( { 1.8416, 0.8416 } ) ); + } + + void test_symbolic_bound_maps_max_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 2] + + Layers 1, 2, 3: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 3] + x2.ub = x0 + x1 : [-2, 3] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-3, 2] + x3.ub = x0 - x1 : [-3, 2] + + Both ReLUs are undecided, bounds are concretized. + First ReLU: 3 = ub > -lb = 2, using lower ReLU coefficient of 1. + Upper coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6. + First ReLU: 2 = ub <= -lb = 3, using lower ReLU coefficient of 0. + Upper coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 + + x2 <= x4 <= 0.6 x2 + 1.2 + x4.lb = x0 + x1 + x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 + x4 range: [-2, 3] + + 0 <= x5 <= 0.4 x3 + 1.2 + x5.lb = 0 + x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 + x5 range: [0, 2] + + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x5, and its upper bound is constant 3. + + x5 <= x6 <= 3 + x6.lb = 0 : [0, 0] + x6.ub = 3 : [3, 3] + x6 range: [0, 3] + + Layer 4: + + x7 = 2x6 + => 2x5 <= x7 <= 6 + x7.lb = 2 ( 0 ) = 0 : [0, 0] + x7.ub = 2 ( 3 ) = 6 : [6, 6] + x7 range: [0, 6] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 3, Tightening::UB ), + Tightening( 3, -3, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -2, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 3, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), + Tightening( 7, 6, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + x2 <= x4 <= 0.6 x2 + 1.2 + 0 <= x5 <= 0.4 x3 + 1.2 + + Layer 3 (MAX): + x5 <= x6 <= 6 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 4: + x7 <= x7 <= x7 + + Layer 3: + Using x7 = 2x6: + 2x6 <= x7 <= 2x6 + + Layer 2: + Using x5 <= x6 <= 3: + 2x5 <= x7 <= 6 + + Layer 1: + Using 0 <= x5 <= 0.4 x3 + 1.2: + 0 <= x7 <= 6 + + Layer 0: + 0 <= x7 <= 6 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 1, 0, 0, 0 } ), + Vector( { 0.6, 0, 0, 0.4 } ), + Vector( { 0, 0 } ), + Vector( { 1.2, 1.2 } ) ); + comparePredecessorLayerSymbolicBounds( nlr, + 3, + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 3 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 4, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 2 } ), + Vector( { 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 0, 2 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 6 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 6 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 6 } ) ); + } + + void test_symbolic_bound_maps_max_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -3 ); + tableau.setUpperBound( 1, -2 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-3, -2] + + Layer 1: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 0] + x2.ub = x0 + x1 : [-2, 0] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [3, 5] + x3.ub = x0 - x1 : [3, 5] + + First ReLU is negative, bounds become constant 0 + Second ReLU is positive, bounds survive the activation + + 0 <= x4 <= 0 + x4: all set to 0 + + x3 <= x5 <= x3 + x5.lb = x0 - x1 : [3, 5] + x5.ub = x0 - x1 : [3, 5] + + Max is fixed because x5.lb > x4.ub, it inherits x5's bounds + + x5 <= x6 <= x5 + => x3 <= x6 <= x5 + x6.lb = x0 - x1 : [3, 5] + x6.ub = x0 - x1 : [3, 5] + + Layer 3: + + x7 = 2x6 + => x7 = 2x5 = 2x3 = 2x0 - 2x1 + x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 0, Tightening::UB ), + Tightening( 3, 3, Tightening::LB ), + Tightening( 3, 5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 3, Tightening::LB ), + Tightening( 5, 5, Tightening::UB ), + Tightening( 6, 3, Tightening::LB ), + Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 6, Tightening::LB ), + Tightening( 7, 10, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0 <= x4 <= 0 + x3 <= x5 <= x3 + + Layer 3 (MAX): + x5 <= x6 <= x5 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 4: + x7 <= x7 <= x7 + + Layer 3: + Using x7 = 2x6: + 2x6 <= x7 <= 2x6 + + Layer 2: + Using x5 <= x6 <= x5: + 2x5 <= x7 <= 2x5 + + Layer 1: + Using x3 <= x5 <= x3: + 2x3 <= x7 <= 2x3 + + Layer 0: + Using x3 = x0 - x1 + 2x0 - 2x1 <= x7 <= 2x0 - 2x1 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + comparePredecessorLayerSymbolicBounds( nlr, + 3, + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 4, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 2 } ), + Vector( { 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 0, 2 } ), + Vector( { 0, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0, 2 } ), + Vector( { 0, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 2, -2 } ), + Vector( { 2, -2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + } + + void test_symbolic_bound_maps_softmax1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + } + + void test_symbolic_bound_maps_softmax2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + */ + + unsigned size = nlr.getLayer( 2 )->getSize(); + Vector sourceLbs = { 1.999899, 2.999899, -0.000003 }; + Vector sourceUbs = { 2.000102, 3.000102, 0.0001 }; + Vector sourceMids = { 2.0000005, 3.0000005, -0.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = + NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.4243, 0.4481, 0.1277 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.4243, 0.4480, 0.1277 } ) ) ); + + /* + Layer 2: + +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6 range: [ 0.2595, 0.2595 ] + +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 + x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 + x7 range: [ 0.7054, 0.7054 ] + +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277 + x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8 range: [ 0.0351, 0.0351 ] + + Layer 3: + + x9 = x6 + x7 + x8 + => x9 = ( 0.1922 - 0.1830 - 0.0091 ) x3 + ( -0.1830 + 0.2078 - 0.0248 ) x4 + ( + -0.0091 - 0.0248 + 0.0339 ) x5 + ( 0.4243 + 0.4481 + 0.1277 ) + + => x9 = 0.0001 x3 + 0 x4 + 0 x5 + 1.0001 + => ( Up to rounding ) 1 <= x9 <= 1. + x9.lb = 1 + x9.ub = 1 + x9 range: [ 1, 1 ] + + x10 = - x6 - x7 - x8 + => x10 = - ( 0.1922 - 0.1830 - 0.0091 ) x3 - ( -0.1830 + 0.2078 - 0.0248 ) x4 - ( + -0.0091 - 0.0248 + 0.0339 ) x5 - ( 0.4243 + 0.4481 + 0.1277 ) + + => x10 = - 0.0001 x3 - 0.0000 x4 - 0.0000 x5 - 1.0001 + => ( Up to rounding ) 1 <= x10 <= 1. + x10.lb = 1 + x10.ub = 1 + x10 range: [ -1, -1 ] + */ + + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SOFTMAX): +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x9 <= x9 <= x9 + x10 <= x10 <= x10 + + Layer 2: + Using x9 = x6 + x7 + x8, x10 = -x6 - x7 - x8: + x6 + x7 + x8 <= x9 <= x6 + x7 + x8 + -x6 - x7 - x8 <= x10 <= -x6 - x7 - x8 + + Layer 1: + Using +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243. +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481. +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277: + 1 <= x9 <= 1 + -1 <= x10 <= -1 + + Layer 0: + 1 <= x9 <= 1 + -1 <= x10 <= -1 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.4243, 0.4481, 0.1277 } ), + Vector( { 0.4243, 0.4480, 0.1277 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + } + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + */ + + unsigned size = nlr.getLayer( 2 )->getSize(); + Vector sourceLbs = { 1.999899, 2.999899, -0.000003 }; + Vector sourceUbs = { 2.000102, 3.000102, 0.0001 }; + Vector sourceMids = { 2.0000005, 3.0000005, -0.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::ERLowerBound( sourceMids, sourceLbs, sourceUbs, i ); // Using er + symbolicUpperBias[i] = + NLR::Layer::ERUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dERLowerBound( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dERUpperBound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.4243, 0.4481, 0.1277 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.4243, 0.4480, 0.1277 } ) ) ); + + /* + Layer 2: + +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6 range: [ 0.2595, 0.2595 ] + +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 + x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 + x7 range: [ 0.7054, 0.7054 ] + +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277 + x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8 range: [ 0.0351, 0.0351 ] + + Layer 3: + + x9 = x6 + x7 + x8 + => x9 = ( 0.1922 - 0.1830 - 0.0091 ) x3 + ( -0.1830 + 0.2078 - 0.0248 ) x4 + ( + -0.0091 - 0.0248 + 0.0339 ) x5 + ( 0.4243 + 0.4481 + 0.1277 ) + + => x9 = 0.0001 x3 + 0 x4 + 0 x5 + 1.0001 + => ( Up to rounding ) 1 <= x9 <= 1. + x9.lb = 1 + x9.ub = 1 + x9 range: [ 1, 1 ] + + x10 = - x6 - x7 - x8 + => x10 = - ( 0.1922 - 0.1830 - 0.0091 ) x3 - ( -0.1830 + 0.2078 - 0.0248 ) x4 - ( + -0.0091 - 0.0248 + 0.0339 ) x5 - ( 0.4243 + 0.4481 + 0.1277 ) + + => x10 = - 0.0001 x3 - 0.0000 x4 - 0.0000 x5 - 1.0001 + => ( Up to rounding ) 1 <= x10 <= 1. + x10.lb = 1 + x10.ub = 1 + x10 range: [ -1, -1 ] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SOFTMAX): +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x9 <= x9 <= x9 + x10 <= x10 <= x10 + + Layer 2: + Using x9 = x6 + x7 + x8, x10 = -x6 - x7 - x8: + x6 + x7 + x8 <= x9 <= x6 + x7 + x8 + -x6 - x7 - x8 <= x10 <= -x6 - x7 - x8 + + Layer 1: + Using +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243. +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481. +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277: + 1 <= x9 <= 1 + -1 <= x10 <= -1 + + Layer 0: + 1 <= x9 <= 1 + -1 <= x10 <= -1 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.4243, 0.4481, 0.1277 } ), + Vector( { 0.4243, 0.4480, 0.1277 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + } + } + + void test_symbolic_bound_maps_softmax3() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax2( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + + x6 = -x0 - x1 - x2 + 2 + x6.lb = -x0 - x1 - x2 + 2 : [ -1.000003, -1 ] + x6.ub = -x0 - x1 - x2 + 2 : [ -1.000003, -1 ] + x6 range: [ -1.000003, -1 ] + + x7 = -x0 - x1 - x2 + 1 + x7.lb = -x0 - x1 - x2 + 1 : [ -2.000003, -2 ] + x7.ub = -x0 - x1 - x2 + 1 : [ -2.000003, -2 ] + x7 range: [ -2.000003, -2 ] + */ + + // First Sigmoid: x8 x10 x12 = softmax( x3, x5, x7 ). + unsigned size = nlr.getLayer( 2 )->getActivationSources( 0 ).size(); + Vector sourceLbs = { 1.999899, -0.000003, -2.000103 }; + Vector sourceUbs = { 2.000102, 0.0001, -1.999 }; + Vector sourceMids = { 2.0000005, -0.0000015, -2.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.8668, 0.1173, 0.0159 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.8668, 0.1173, 0.0159 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1155, + -0.1017, + -0.0138, + -0.1017, + 0.1035, + -0.0019, + -0.0138, + -0.0019, + 0.0156 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1154, + -0.1017, + -0.0138, + -0.1017, + 0.1036, + -0.0019, + -0.0138, + -0.0019, + 0.0156 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.6084, 0.3170, 0.0747 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.6084, 0.3170, 0.0747 } ) ) ); + + // Second Sigmoid: x9 x11 = softmax( x4, x6 ). + size = nlr.getLayer( 2 )->getActivationSources( 1 ).size(); + sourceLbs = Vector( { 2.999899, -1.000103 } ); + sourceUbs = Vector( { 3.000102, -0.9999 } ); + sourceMids = Vector( { 3.0000005, -1.0000015 } ); + targetLbs = Vector( size, 0 ); + targetUbs = Vector( size, 0 ); + symbolicLb = Vector( size * size, 0 ); + symbolicUb = Vector( size * size, 0 ); + symbolicLowerBias = Vector( size, 0 ); + symbolicUpperBias = Vector( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.9820, 0.0180 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.9820, 0.0180 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLb, Vector( { 0.0177, -0.0177, -0.0177, 0.0177 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUb, Vector( { 0.0177, -0.0177, -0.0177, 0.0177 } ) ) ); + TS_ASSERT( compareVectors( symbolicLowerBias, Vector( { 0.9114, 0.0886 } ) ) ); + TS_ASSERT( compareVectors( symbolicUpperBias, Vector( { 0.9114, 0.0886 } ) ) ); + + /* + Layer 2: + + First Sigmoid: x8 x10 x12 = softmax( x3, x5, x7 ). +0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1154 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 + x8.lb = 0.2310 x0 + 0.0001 x1 + 0.2310 x2 + 0.4051 + x8.ub = 0.2310 x0 + 0.0000 x1 + 0.2310 x2 + 0.4050 + x8 range: [ 0.8668, 0.8668 ] + +-0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + 0.3170 + x10.lb = -0.2033 x0 + 0.0001 x1 - 0.2033 x2 + 0.5239 + x10.ub = -0.2033 x0 + 0.0000 x1 - 0.2033 x2 + 0.5241 + x10 range: [ 0.1173, 0.1173 ] + +-0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 + x12.lb = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 + x12.ub = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 + x12 range: [ 0.0159, 0.0159 ] + + Second Sigmoid: x9 x11 = softmax( x4, x6 ). +0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 + x9.lb = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 + x9.ub = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 + x9 range: [ 0.9820, 0.0180 ] + +-0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 + x11.lb = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 + x11.ub = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 + x11 range: [ 0.9820, 0.0180 ] + + Layer 3: + + x13 = x8 + x10 + x12 + => x13 = ( 0.1155 - 0.1017 - 0.0138 ) x3 + ( -0.1017 + 0.1035 - 0.0019 ) x5 + + ( -0.0138 - 0.0019 + 0.0156 ) x7 + ( 0.6084 + 0.3170 + 0.0747 ) + + => x13 = 0 x3 - 0.0001 x5 - 0.0001 x7 + 1.0001 + => ( Up to rounding ) 1 <= x13 <= 1. + x13.lb = 1 + x13.ub = 1 + x13 range: [ 1, 1 ] + + x14 = - x8 - x10 - x12 + => x14 = - ( 0.1155 - 0.1017 - 0.0138 ) x3 - ( -0.1017 + 0.1035 - 0.0019 ) x5 + - ( -0.0138 - 0.0019 + 0.0156 ) x7 - ( 0.6084 + 0.3170 + 0.0747 ) + + => x14 = 0 x3 + 0.0001 x5 + 0.0001 x7 - 1.0001 + => ( Up to rounding ) -1 <= x14 <= -1. + x14.lb = -1 + x14.ub = -1 + x14 range: [ -1, -1 ] + + x15 = x9 + x11 + => x15 = ( 0.0177 - 0.0177 ) x4 + ( -0.0177 + 0.0177 ) x6 + ( 0.9114 + 0.0886 ) + + => x15 = 0 x4 + 0 x6 + 1 + => ( Up to rounding ) 1 <= x15 <= 1. + x15.lb = 1 + x15.ub = 1 + x15 range: [ 1, 1 ] + + x16 = - x9 - x11 + => x16 = - ( 0.0177 - 0.0177 ) x4 - ( -0.0177 + 0.0177 ) x6 - ( 0.9114 + 0.0886 ) + + => x16 = 0 x4 + 0 x6 - 1 + => ( Up to rounding ) -1 <= x16 <= -1. + x16.lb = -1 + x16.ub = -1 + x16 range: [ -1, -1 ] + */ + + List expectedBounds( { + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.86681, Tightening::LB ), Tightening( 8, 0.86682, Tightening::UB ), + Tightening( 9, 0.98201, Tightening::LB ), Tightening( 9, 0.98201, Tightening::UB ), + Tightening( 10, 0.11731, Tightening::LB ), Tightening( 10, 0.11731, Tightening::UB ), + Tightening( 11, 0.017985, Tightening::LB ), Tightening( 11, 0.017986, Tightening::UB ), + Tightening( 12, 0.015875, Tightening::LB ), Tightening( 12, 0.015876, Tightening::UB ), + Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), + Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), + Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SOFTMAX): +0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1154 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 +0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 +-0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + 0.3170 +-0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 +-0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x13 <= x13 <= x13 + x14 <= x14 <= x14 + x15 <= x15 <= x15 + x16 <= x16 <= x16 + + Layer 2: + Using x13 = x8 + x10 + x12, x14 = -x8 - x10 - x12, x15 = x9 + x11, x16 = -x9 - x11: + x8 + x10 + x12 <= x13 <= x8 + x10 + x12 + -x8 - x10 - x12 <= x14 <= -x8 - x10 - x12 + x9 + x11 <= x15 <= x9 + x11 + -x9 - x11 <= x16 <= -x9 - x11 + + Layer 1: + Using +0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1154 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 +0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 +-0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + 0.3170 +-0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 +-0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 + 1 <= x13 <= 1 + -1 <= x14 <= -1 + 1 <= x15 <= 1 + -1 <= x16 <= -1 + + Layer 0: + 1 <= x13 <= 1 + -1 <= x14 <= -1 + 1 <= x15 <= 1 + -1 <= x16 <= -1 + */ + comparePredecessorLayerSymbolicBounds( + nlr, + 2, + Vector( { 0.1155, 0.0000, -0.1017, 0.0000, -0.0138, 0.0000, 0.0177, + 0.0000, -0.0177, 0.0000, -0.1017, 0.0000, 0.1035, 0.0000, + -0.0019, 0.0000, -0.0177, 0.0000, 0.0177, 0.0000, -0.0138, + 0.0000, -0.0019, 0.0000, 0.0156 } ), + Vector( { 0.1155, 0.0000, -0.1017, 0.0000, -0.0138, 0.0000, 0.0177, + 0.0000, -0.0177, 0.0000, -0.1017, 0.0000, 0.1035, 0.0000, + -0.0019, 0.0000, -0.0177, 0.0000, 0.0177, 0.0000, -0.0138, + 0.0000, -0.0019, 0.0000, 0.0156 } ), + Vector( { 0.6084, 0.9114, 0.3170, 0.0886, 0.0747 } ), + Vector( { 0.6084, 0.9114, 0.3170, 0.0886, 0.0747 } ) ); + + compareOutputLayerSymbolicBounds( + nlr, + 3, + Vector( { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 } ), + Vector( { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0 } ) ); + compareOutputLayerSymbolicBounds( + nlr, + 2, + Vector( { 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0 } ), + Vector( { 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0 } ), + Vector( { 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( 20, 0 ), + Vector( 20, 0 ), + Vector( { 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( 12, 0 ), + Vector( 12, 0 ), + Vector( { 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1 } ) ); + } + + void test_symbolic_bound_maps_bilinear() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-2, 1] + + Layers 1, 2: + + x2 = x0 - 2x1 + x2.lb = x0 - 2x1 : [-1, 6] + x2.ub = x0 - 2x1 : [-1, 6] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [-1, 3] + x3.ub = x0 + x1 : [-1, 3] + + Coefficients for bilinear layer: + Lower bound: + alpha_l = x3.lb = -1 + beta = x2.lb = -1 + gamma_l = -x2.lb x3.lb = --1 * -1 = -1 + + Upper bound: + alpha_u = x3.ub = 3 + beta = x2.lb = -1 + gamma_u = -x2.lb x3.ub = --1 * 3 = 3 + + -x2 - x3 - 1 <= x4 <= 3x2 - x3 + 3 + x4.lb = -1 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + -1 = -2x0 + x1 - 1 : [-7, -2] + x4.ub = 3 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + 3 = 2x0 - 7x1 + 3 : [0, 21] + x4 range: [-6, 18] + + Layer 3: + + x5 = -x4 + => -3x2 + x3 - 3 <= x4 <= x2 + x3 + 1 + x5.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] + x5.ub = -1 ( -2x0 + x1 - 1 ) = 2x0 - x1 + 1 : [2, 7] + x5 range: [-18, 6] + */ + + List expectedBounds( { Tightening( 2, -1, Tightening::LB ), + Tightening( 2, 6, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 3, Tightening::UB ), + Tightening( 4, -6, Tightening::LB ), + Tightening( 4, 18, Tightening::UB ), + Tightening( 5, -18, Tightening::LB ), + Tightening( 5, 6, Tightening::UB ) } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (BILINEAR): + -x2 - x3 - 1 <= x5 <= 3x2 - x3 + 3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x4 <= x5 <= x4 + + Layer 2: + Using x5 = -x4: + -x3 <= x4 <= -x4 + + Layer 1: + Using -x2 - x3 - 1 <= x4 <= 3x2 - x3 + 3: + -3x2 + x3 - 3 <= x5 <= x2 + x3 + 1 + + Layer 0: + Using x2 = x0 - 2x1, x3 = x0 + x1: + -2x0 + 7x1 - 3 <= x5 <= 2x0 - x1 + 1 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { -1, -1 } ), + Vector( { 3, -1 } ), + Vector( { -1 } ), + Vector( { 3 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { -1 } ), + Vector( { -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { -3, 1 } ), + Vector( { 1, 1 } ), + Vector( { -3 } ), + Vector( { 1 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { -2, 7 } ), + Vector( { 2, -1 } ), + Vector( { -3 } ), + Vector( { 1 } ) ); + } + + void test_parameterised_symbolic_bound_maps_relus_all_active() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both ReLUs active, bound survive through activations: + + x2 <= x4 <= x2 + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => x2 - x3 <= x6 <= x2 - x3 + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + x2 <= x4 <= x2 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + x2 - x3 <= x6 <= x2 - x3 + + Layer 0: + Using x2 = 2x0 + 3x1, x3 = x0 + x1: + x0 + 2x1 <= x6 <= x0 + 2x1 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 1, 2 } ), + Vector( { 1, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + } + + void test_parameterised_symbolic_bound_maps_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 30 + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + 0 <= x4 <= 0 + x4.lb = 0 + x4.ub = 0 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> -x3 <= x6 <= -x3 + x6.lb = -x0 - x1 : [-11, -5] + x6.ub = -x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0 <= x4 <= 0 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + -x3 <= x6 <= -x3 + + Layer 0: + Using x3 = x0 + x1: + -x0 - x1 <= x6 <= -x0 - x1 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + } + + void test_parameterised_symbolic_bound_maps_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [-2, 12] + 0.5 x2 <= x4 <= 0.75 x2 + 3 + x4.lb = 0.5 ( 2x0 + 3x1 - 15 ) = x0 + 1.5 x1 - 7.5 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> 0.5 x2 - x3 <= x6 <= 0.75x2 - x3 + 3 + x6.lb = 0.5 x1 - 7.5 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [0.5 - 7.5 = -7, 3 + 6.25 - 8.25 = 1] = [-7, 1] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -2, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -7, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0.5 x2 <= x4 <= 0.75 x2 + 3 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + 0.5 x2 - x3 <= x6 <= 0.75x2 - x3 + 3 + + Layer 0: + Using x2 = 2x0 + 3x1, x3 = x0 + x1: + 0.5 x1 - 7.5 <= x6 <= 0.5x0 + 1.25x1 - 8.25 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0.5, 0, 0, 1 } ), + Vector( { 0.75, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 3, 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0.5, -1 } ), + Vector( { 0.75, -1 } ), + Vector( { 0 } ), + Vector( { 3 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 0, 0.5 } ), + Vector( { 0.5, 1.25 } ), + Vector( { -7.5 } ), + Vector( { -8.25 } ) ); + } + + void test_parameterised_symbolic_bound_maps_relus_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation + + 0 <= x4 <= 0 + x4.lb = 0 + x4.ub = 0 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> -x3 <= x6 <= -x3 + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0 <= x4 <= 0 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + -x3 <= x6 <= -x3 + + Layer 0: + Using x3 = x0 + x1: + -x0 - x1 <= x6 <= -x0 - x1 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + } + + void test_parameterised_symbolic_bound_maps_relu_residual1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual1( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layers 1. 2: + + x1 = x0 + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + 0.5 x1 <= x2 <= 0.5x1 + 0.5 + x2.lb = 0.5 x0 + x2.ub = 0.5 x0 + 0.5 + x2 range: [-0.5, 1] + + Layers 3, 4 (with residual from x0): + + x3 = - x2 - x0 + 1 + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5 x0 + 0.5 : [-1, 2] + x3.ub = -1( 0.5 x0 ) -1x0 + 1 = -1.5 x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + 0.5 x3 <= x4 <= 5/7 x3 + 5/7 + x4.lb = 0.5 ( -1.5 x0 + 0.5 ) = -0.75 x0 + 0.25 : [-0.5, 1] + x4.ub = 5/7 ( -1.5 x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [1, 35/14 = 2.5] + x4 range: [-0.5, 2.5] + + Layer 5 (with residual from x1): + + x5 = 3x4 + 3x1 + 1 + x5.lb = 3 ( -0.75 x0 + 0.25 ) + 3 ( x0 ) + 1 = 0.75x0 + 1.75 : [1, 2.5] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] + x5 range: [1, 5.5] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, -0.5, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 5.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0.5 x1 <= x2 <= 0.5x1 + 0.5 + + Layer 4 (RELU): + 0.5 x3 <= x4 <= 5/7 x3 + 5/7 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 5: + x5 <= x5 <= x5 + + Layer 4: + Using x5 = 3x4 + 3x1 + 1: + 3x4 + 3x1 + 1 <= x5 <= 3x4 + 3x1 + 1 + Concretizing residual using x1 : [-1, 1]: 3x4 - 2 <= x5 <= 3x4 + 4 + + Layer 3: + Using 0.5 x3 <= x4 <= 5/7 x3 + 5/7: + 1.5 x3 + 3x1 + 1 <= x5 <= 15/7 x3 + 3x1 + 22/7 + Concretizing residual using x1 : [-1, 1]: 1.5 x3 - 2 <= x5 <= 15/7 x3 + 43/7 + + Layer 2: + Using x3 = -x2 - x0 + 1: + -1.5 x2 + 3x1 - 1.5 x0 + 2.5 <= x5 <= -15/7 x2 + 3x1 - 15/7 x0 + 37/7 + Concretizing residual using x0 : [-1, 1], x1 : [-1, 1]: -1.5 x2 - 2 <= x5 <= -15/7 x2 + + 73/7 + + Layer 1: + Using 0.5 x1 <= x2 <= 0.5x1 + 0.5: + 2.25 x1 - 1.5 x0 + 1.75 <= x5 <= 27/14 x1 - 15/7 x0 + 37/7 + Concretizing residual using x0 : [-1, 1]: 2.25x1 + 0.25 <= x5 <= 27/14 x1 + 52/7 + + Layer 0: + Using x1 = x0: + 0.75 x0 + 1.75 <= x5 <= -3/14 x0 + 37/7 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0.5 } ), + Vector( { 0.5 } ), + Vector( { 0 } ), + Vector( { 0.5 } ) ); + comparePredecessorLayerSymbolicBounds( nlr, + 4, + Vector( { 0.5 } ), + Vector( { 0.7143 } ), + Vector( { 0 } ), + Vector( { 0.7143 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 5, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 4, + Vector( { 3 } ), + Vector( { 3 } ), + Vector( { -2 } ), + Vector( { 4 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1.5 } ), + Vector( { 2.1429 } ), + Vector( { -2 } ), + Vector( { 6.1429 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { -1.5 } ), + Vector( { -2.1429 } ), + Vector( { -2 } ), + Vector( { 10.4286 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 2.25 } ), + Vector( { 1.9286 } ), + Vector( { 0.25 } ), + Vector( { 7.4286 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 0.75 } ), + Vector( { -0.2143 } ), + Vector( { 1.75 } ), + Vector( { 5.2857 } ) ); + } + + void test_parameterised_symbolic_bound_maps_relu_residual2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layers 1, 2: + + x1 = x0 + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper cCoefficient: 1/( 1--1 ) = 1/2 = 0.5 + + 0.5 x1 <= x2 <= 0.5x1 + 0.5 + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [-0.5, 1] + + Layers 3, 4 (with residual from x0): + + x3 = - x2 - x0 + 1 + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] + x3.ub = -1( 0.5 x0 ) -1x0 + 1 = -1.5 x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + 0.5 x3 <= x4 <= 5/7 x3 + 5/7 + x4.lb = 0.5 ( -1.5 x0 + 0.5 ) = -0.75 x0 + 0.25 : [-0.5, 1] + x4.ub = 5/7 ( -1.5 x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [1, 35/14 = 2.5] + x4 range: [-0.5, 2.5] + + Layer 5 (with residual from x0): + + x5 = 3x4 + x0 + 1 + x5.lb = 3 ( -0.75 x0 + 0.25 ) + ( x0 ) + 1 = -1.25x0 + 1.75 : [0.5, 3] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] + x5 range: [0.5, 7.5] + + Layer 6: + x6 = x5 + x6.lb = -1.25x0 + 1.75 : [0.5, 3] + x6.ub = -31/14 x0 + 74/14 : [43/14, 7.5] + x6 range: [0.5, 7.5] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, -0.5, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), + Tightening( 5, 7.5, Tightening::UB ), + Tightening( 6, 0.5, Tightening::LB ), + Tightening( 6, 7.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0.5 x1 <= x2 <= 0.5x1 + 0.5 + + Layer 4 (RELU): + 0.5 x3 <= x4 <= 5/7 x3 + 5/7 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 6: + x6 <= x6 <= x6 + + Layer 5: + Using x6 = x5: + x5 <= x6 <= x5 + + Layer 4: + Using x5 = 3x4 + x0 + 1: + 3x4 + x0 + 1 <= x6 <= 3x4 + x0 + 1 + Concretizing residual using x0 : [-1, 1]: 3x4 <= x6 <= 3x4 + 2 + + Layer 3: + Using 0.5 x3 <= x4 <= 5/7 x3 + 5/7: + 1.5 x3 + x0 + 1 <= x6 <= 15/7 x3 + x0 + 22/7 + Concretizing residual using x0 : [-1, 1]: 1.5 x3 <= x6 <= 15/7 x3 + 29/7 + + Layer 2: + Using x3 = -x2 - x0 + 1: + -1.5 x2 - 0.5 x0 + 2.5 <= x6 <= -15/7 x2 - 8/7 x0 + 37/7 + Concretizing residual using x0 : [-1, 1]: -1.5 x2 + 2 <= x6 <= -15/7 x2 + 45/7 + + Layer 1: + Using 0.5 x1 <= x2 <= 0.5x1 + 0.5: + -0.75x1 - 0.5 x0 + 1.75 <= x6 <= -15/14 x1 - 8/7 x0 + 37/7 + Concretizing residual using x0 : [-1, 1]: -0.75x1 + 1.25 <= x6 <= -15/14 x1 + 45/7 + + Layer 0: + Using x1 = x0: + -1.25 x0 + 1.75 <= x6 <= -31/14 x0 + 37/7 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0.5 } ), + Vector( { 0.5 } ), + Vector( { 0 } ), + Vector( { 0.5 } ) ); + comparePredecessorLayerSymbolicBounds( nlr, + 4, + Vector( { 0.5 } ), + Vector( { 0.7143 } ), + Vector( { 0 } ), + Vector( { 0.7143 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 6, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 5, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 4, + Vector( { 3 } ), + Vector( { 3 } ), + Vector( { 0 } ), + Vector( { 2 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1.5 } ), + Vector( { 2.1429 } ), + Vector( { 0 } ), + Vector( { 4.1429 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { -1.5 } ), + Vector( { -2.1429 } ), + Vector( { 2 } ), + Vector( { 6.4286 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { -0.75 } ), + Vector( { -1.0714 } ), + Vector( { 1.25 } ), + Vector( { 6.4286 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { -1.25 } ), + Vector( { -2.2143 } ), + Vector( { 1.75 } ), + Vector( { 5.2857 } ) ); + } + + void test_parameterised_symbolic_bound_maps_relu_reindex() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluReindex( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layers 1, 2: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both ReLUs are undecided, bounds are concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 2/( 2--2 ) = 2/4 = 0.5 + + 0.5 x2 <= x4 <= 0.5x2 + 1 + x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 + x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 + x4 range: [-1, 2] + + 0.5 x3 <= x5 <= 0.5x3 + 1 + x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 + x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 + x5 range: [-1, 2] + + Layers 3, 4: + + x6 = x4 + x5 + x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] + x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] + x6 range: [-1, 3] + + x7 = x4 - x5 + x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] + x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] + x7 range: [-2, 2] + + Both ReLUs are undecided, bounds are concretized. Using custom ReLU lower + coefficient of 0.5. + Upper coefficient (first ReLU): 3/( 3--1 ) = 3/4 = 0.75 + Upper coefficient (second ReLU): 2/( 2--2 ) = 2/4 = 0.5 + + 0.5 x6 <= x8 <= 0.75 x6 + 0.75 + x8.lb = 0.5 ( x0 ) = 0.5 x0 + x8.ub = 0.75 ( x0 + 2 ) + 0.75 = 0.75 x0 + 2.25 + x8 range: [-0.5, 3] + + 0.5 x7 <= x9 <= 0.5 x7 + 1 + x9.lb = 0.5 ( x1 - 1 ) = 0.5 x1 - 0.5 + x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 + x9 range: [-1, 2] + + Layer 5: + x10 = x8 + x9 + 1 + x10.lb = 1 ( 0.5 x6 ) + 1 ( 0.5 x7 ) + 1 = ( 0.5 x4 + 0.5x5 ) + 1 ( 0.5 x4 - 0.5x5 ) + 1 + = x4 + 1 >= 0.5 x2 + 1 = 0.5 x0 + 0.5x1 + 1 : [0, 2] + x10.ub = 1 ( 0.75 x6 + 0.75 ) + 1 ( 0.5 x7 + 1 ) + 1 + = ( 0.75 x4 + 0.75 x5 + 0.75 ) + 1 ( 0.5 x4 - 0.5x5 + 1 ) + 1 + = 1.25 x4 + 0.25 x5 + 2.75 <= 0.625 x4 + 0.125 x5 + 4.25 + = 0.75 x0 + 0.5 x1 + 4.25 : [2.5, 5.5] + x10 range: [0, 5.5] + + x11 = x9 + x11.lb = 0.5 x1 - 0.5 : [-1, 0] + x11.ub = 0.5x1 + 1.5 : [1, 2] + x11 range: [-1, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 5.5, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0.5 x2 <= x4 <= 0.5x2 + 1 + 0.5 x3 <= x5 <= 0.5x3 + 1 + + Layer 4 (RELU): + 0.5 x6 <= x8 <= 0.75 x6 + 0.75 + 0.5 x7 <= x9 <= 0.5 x7 + 1 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 5: + x10 <= x10 <= x10 + x11 <= x11 <= x11 + + Layer 4: + Using x10 = x8 + x9 + 1, x11 = x9: + x8 + x9 + 1 <= x10 <= x8 + x9 + 1 + x9 <= x11 <= x9 + + Layer 3: + Using 0.5 x6 <= x8 <= x6, 0.5 x7 <= x9 <= 0.5 x7 + 1: + 0.5 x6 + 0.5 x7 + 1 <= x10 <= 0.75 x6 + 0.5 x7 + 2.75 + 0.5 x7 <= x11 <= 0.5 x7 + 1 + + Layer 2: + Using x6 = x4 + x5, x7 = x4 - x5: + x4 + 1 <= x10 <= 1.25 x4 + 0.25 x5 + 2.75 + 0.5 x4 - 0.5 x5 <= x11 <= 0.5 x4 - 0.5 x5 + 1 + + Layer 1: + Using 0.5 x2 <= x4 <= 0.5x2 + 1, 0.5 x3 <= x5 <= 0.5x3 + 1: + 0.5 x2 + 1 <= x10 <= 0.625 x2 + 0.125 x3 + 4.25 + 0.25 x2 - 0.25 x3 - 0.5 <= x11 <= 0.25 x2 - 0.25 x3 + 1.5 + + Layer 0: + Using x2 = x0 + x1, x3 = x0 - x1: + 0.5 x0 + 0.5 x1 + 1 <= x10 <= 0.75 x0 + 0.5 x1 + 4.25 + 0.5 x1 - 0.5 <= x11 <= 0.5 x1 + 1.5 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0, 0.5, 0.5, 0 } ), + Vector( { 0, 0.5, 0.5, 0 } ), + Vector( { 0, 0 } ), + Vector( { 1, 1 } ) ); + + comparePredecessorLayerSymbolicBounds( nlr, + 4, + Vector( { 0, 0.5, 0.5, 0 } ), + Vector( { 0, 0.75, 0.5, 0 } ), + Vector( { 0, 0 } ), + Vector( { 1, 0.75 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 5, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 4, + Vector( { 1, 1, 1, 0 } ), + Vector( { 1, 1, 1, 0 } ), + Vector( { 1, 0 } ), + Vector( { 1, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 0.5, 0, 0.5, 0.5 } ), + Vector( { 0.75, 0, 0.5, 0.5 } ), + Vector( { 1, 0 } ), + Vector( { 2.75, 1 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 0, -0.5, 1, 0.5 } ), + Vector( { 0.25, -0.5, 1.25, 0.5 } ), + Vector( { 1, 0 } ), + Vector( { 2.75, 1 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0.5, 0.25, 0, -0.25 } ), + Vector( { 0.625, 0.25, 0.125, -0.25 } ), + Vector( { 1, -0.5 } ), + Vector( { 4.25, 1.5 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 0.5, 0, 0.5, 0.5 } ), + Vector( { 0.75, 0, 0.5, 0.5 } ), + Vector( { 1, -0.5 } ), + Vector( { 4.25, 1.5 } ) ); + } + + void test_parameterised_symbolic_bound_maps_abs_all_positive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both absolute values positive, bound survive through activations: + + x2 <= x4 <= x2 + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + x5 = x4 - x5 + => x2 - x3 <= x5 <= x2 - x3 + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (ABSOLUTE_VALUE): + x2 <= x4 <= x2 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using x2 <= x4 <= x2, x3 <= x5 <= x3: + x2 - x3 <= x6 <= x2 - x3 + + Layer 0: + Using x2 = 2x0 + 3x1, x3 = x0 + x1: + x0 + 2x1 <= x6 <= x0 + 2x1 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 1, 2 } ), + Vector( { 1, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + } + + void test_parameterised_symbolic_bound_maps_abs_positive_and_negative() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + x2 = 2x0 + 3x1 - 30 + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + -x2 <= x4 <= -x2 + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + x5 = x4 - x5 + => -x2 - x3 <= x5 <= -x2 - x3 + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (ABSOLUTE_VALUE): + -x2 <= x4 <= -x2 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using -x2 <= x4 <= -x2, x3 <= x5 <= x3: + -x2 - x3 <= x6 <= -x2 - x3 + + Layer 0: + Using x2 = 2x0 + 3x1 - 30, x3 = x0 + x1: + -3x0 - 4x1 + 30 <= x6 <= -3x0 - 4x1 + 30 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { -1, 0, 0, 1 } ), + Vector( { -1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { -3, -4 } ), + Vector( { -3, -4 } ), + Vector( { 30 } ), + Vector( { 30 } ) ); + } + + void test_parameterised_symbolic_bound_maps_absolute_values_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is undecided, bounds are concretized. + Second absolute value is active, bounds surive the activation + + 0 <= x4 <= 12 + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => -x3 <= x6 <= -x3 + 12 + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (ABSOLUTE_VALUE): + 0 <= x4 <= 12 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using 0 <= x4 <= 12, x3 <= x5 <= x3: + -x3 <= x6 <= -x3 + 12 + + Layer 0: + Using x3 = x0 + x1: + -x0 - x1 <= x6 <= -x0 - x1 + 12 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 12, 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 0 } ), + Vector( { 12 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 12 } ) ); + } + + void test_parameterised_symbolic_bound_maps_absolute_values_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + -x2 <= x4 <= -x2 + x4: all set to 3 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => -x2 - x3 <= x6 <= -x2 - x3 + => -x3 + 3 <= x6 <= -x3 + 3 + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (ABSOLUTE_VALUE): + -x2 <= x4 <= -x2 + x3 <= x5 <= x3 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using -x2 <= x4 <= -x2, x3 <= x5 <= x3: + -x2 - x3 <= x6 <= -x2 - x3 + x2 = -3 is eliminated. + -x3 + 3 <= x6 <= -x3 + 3 + + Layer 0: + Using x3 = x0 + x1: + - x0 - x1 + 3 <= x6 <= - x0 - x1 + 3 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { -1, 0, 0, 1 } ), + Vector( { -1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 3 } ), + Vector( { 3 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 3 } ), + Vector( { 3 } ) ); + } + + void test_parameterised_symbolic_bound_maps_signs_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is undecided, bounds are concretized. + Second sign is active, bounds become constant 1 + Using custom coefficients with alpha = { 0.5, 0.5 }. + Coefficient (first Sign, lower): 2/12 * 0.5 = 1/12. + Coefficient (first Sign, upper): -2/-4 * 0.5 = 1/4. + + 1/12 x2 - 1 <= x4 <= 1/4 x2 + 1 + x4.lb = 1/12 ( 2x0 + 3x1 - 15 ) - 1 = 2/12 x0 + 3/12 x1 - 27/12 + x4.ub = 1/4 ( 2x0 + 3x1 - 15 ) + 1 = 0.5 x0 + 0.75x1 - 2.75 + x4 range: [-1, 1] + + 1 <= x5 <= 1 + x5.lb = 1 + x5.ub = 1 + x5 range: [1, 1] + + Layer 3: + + x6 = x4 - x5 : [-2, 0] + => 1/12 x2 - 2 <= x6 <= 1/4 x2 : [-8/3, 6] + x6.lb = 1 ( 2/12 x0 + 3/12 x1 - 27/12 ) - 1 ( 1 ) = 2/12 x0 + 3/12 x1 - 39/12 : + [-28/12 = -7/3, -1] + x6.ub = 1 ( 0.5 x0 + 0.75x1 - 2.75 ) - 1 ( 1 ) = 0.5 x0 + 0.75x1 - 3.75 : [-1, 3] + + x6 range: [-2, 0] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, 0, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SIGN): + 1/12 x2 - 1 <= x4 <= 1/4 x2 + 1 + 1 <= x5 <= 1 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using 1/12 x2 - 1 <= x4 <= 1/4 x2 + 1, 1 <= x5 <= 1: + 1/12 x2 - 2 <= x6 <= 1/4 x2 + + Layer 0: + Using x2 = 2x0 + 3x1 - 15: + 1/6 x0 + 1/4 x1 - 3.25 <= x6 <= 0.5 x0 + 0.75x1 - 3.75 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0.0833, 0, 0, 0 } ), + Vector( { 0.25, 0, 0, 0 } ), + Vector( { -1, 1 } ), + Vector( { 1, 1 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0.0833, 0 } ), + Vector( { 0.25, 0 } ), + Vector( { -2 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 0.1667, 0.25 } ), + Vector( { 0.5, 0.75 } ), + Vector( { -3.25 } ), + Vector( { -3.75 } ) ); + } + + void test_parameterised_symbolic_bound_maps_signs_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is negative, bounds become constant -1 + Second sign is positive, bounds become constant 1 + + -1 <= x4 <= 1 + x4: all set to -1 + + 1 <= x5 <= 1 + x5: all set to 1 + + Layer 3: + + x6 = x5 - x4 + x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 + x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, -1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SIGN): + -1 <= x4 <= -1 + 1 <= x5 <= 1 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x6 <= x6 <= x6 + + Layer 2: + Using x6 = x5 - x4: + x4 - x5 <= x6 <= x4 - x5 + + Layer 1: + Using -1 <= x4 <= -1, 1 <= x5 <= 1: + -2 <= x6 <= -2 + + Layer 0: + -2 <= x6 <= -2 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0 } ), + Vector( { -1, 1 } ), + Vector( { -1, 1 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { -2 } ), + Vector( { -2 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { -2 } ), + Vector( { -2 } ) ); + } + + void test_parameterised_symbolic_bound_maps_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layer 1: + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. Using custom lower coefficient with + alpha = { 0.5 }. + Lower Coefficient: ( 1 - 0.2 ) * 0.5 + 0.2 = 0.6 + Lower Bias: 0 + Upper Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Upper Bias: ( 0.2 - 1 ) * 2 * -2 /( 2--2 ) = 0.8 + + 0.6 x2 <= x4 <= 0.6 x2 + 0.8 + x4.lb = 0.6 ( x0 + x1 ) = 0.6 x0 + 0.6x1 + x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6 x0 + 0.6 x1 + 0.8 + x4 range: [-1.2, 2] + + 0.6 x3 <= x5 <= 0.6 x3 + 0.8 + x5.lb = 0.6 ( x0 - x1 ) = 0.6 x0 - 0.6 x1 + x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6 x0 - 0.6 x1 + 0.8 + x5 range: [-1.2, 2] + + Layer 2: + + x6 = x4 + x5 + x6.lb = 1 ( 0.6x0 + 0.6x1 ) + 1 ( 0.6x0 - 0.6x1 ) = 1.2 x0 : [-1.2, 1.2] + x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2 x0 + 1.6 : + [0.4, 2.8] x6 range: [-1.2, 2.8] + + x7 = x4 - x5 + x7.lb = 1 ( 0.6x0 + 0.6x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2 x1 - 0.8 : [-2, 0.4] + x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( 0.6x0 - 0.6x1 ) = 1.2 x1 + 0.8 : [-0.4, 2] + x7 range: [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. Using custom lower coefficient with + alpha = { 0.5 }. + Lower Coefficient (first LeakyReLU): ( 1 - 0.2 ) * 0.5 + 0.2 = 0.6 + Lower Bias (first LeakyReLU): 0 + Upper Coefficient (first LeakyReLU): ( 2.8 - 0.2*-1.2 )/( 2.8--1.2 ) = 3.04/4 = 0.76 + Upper Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -1.2 / ( 2.8--1.2 ) = 0.672 + + Lower Coefficient (second LeakyReLU): ( 1 - 0.2 ) * 0.5 + 0.2 = 0.6 + Lower Bias (second LeakyReLU): 0 + Upper Coefficient (second LeakyReLU): ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Upper Bias (second LeakyReLU): ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 + + 0.6 x6 <= x8 <= 0.76 x6 + 0.672 + x8.lb = 0.6 ( 1.2x0 ) = 0.72 x0 + x8.ub = 0.76 ( 1.2x0 + 1.6 ) + 0.672 = 0.912 x0 + 1.888 + x8 range: [-0.72, 2.8] + + 0.6 x7 <= x9 <= 0.6 x7 + 0.8 + x9.lb = 0.6 ( 1.2x1 - 0.8 ) = 0.72 x0 - 0.48 + x9.ub = 0.6 ( 1.2x1 + 0.8 ) + 0.8 = 0.72 x1 + 1.28 + x9 range: [-1.2, 2] + + Layer 3: + + x10 = x8 + x9 + 1 + x10.lb = 0.6 x6 + 0.6 x7 + 1 >= 0.6 ( x4 + x5 ) + 0.6 ( x4 - x5 ) + 1 = + 1.2 x4 + 1 >= 1.2 ( 0.6 x2 ) + 1 = 0.72 x2 + 1 + = 0.72 x0 + 0.72 x1 + 1 : [-0.44, 2.44] + x10.lb = ( 0.76 x6 + 0.672 ) + ( 0.6 x7 + 0.8 ) + 1 = 0.76 x6 + 0.6 x7 + 2.472 + >= 0.76 ( x4 + x5 ) + 0.6 ( x4 - x5 ) + 2.472 = 1.36 x4 + 0.16 x5 + 2.472 + >= 1.36 ( 0.6 x2 + 0.8 ) + 0.16 ( 0.6 x3 + 0.8 ) + 2.472 + = 0.816 x2 + 0.096 x3 + 3.688 = 0.912 x0 + 0.72 x1 + 3.688 : [2.056, 5.32] + x10 range: [-0.44, 5.32] + + x11.lb = 0.72 x0 - 0.48 : [-1.2, 0.24] + x11.ub = 0.72 x1 + 1.28 : [-0.56, 2] + x11 range: [-1.2, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1.2, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1.2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -0.72, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), + Tightening( 9, -1.2, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -0.44, Tightening::LB ), Tightening( 10, 5.32, Tightening::UB ), + Tightening( 11, -1.2, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (LEAKY_RELU): + 0.6 x2 <= x4 <= 0.6 x2 + 0.8 + 0.6 x3 <= x5 <= 0.6 x3 + 0.8 + + Layer 4 (LEAKY_RELU): + 0.6 x6 <= x8 <= 0.76 x6 + 0.672 + 0.6 x7 <= x9 <= 0.6 x7 + 0.8 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 5: + x10 <= x10 <= x10 + x11 <= x11 <= x11 + + x10 = x8 + x9 + 1 + x10.lb = >= 0.6 ( x4 + x5 ) + 0.6 ( x4 - x5 ) + 1 = + 1.2 x4 + 1 >= 1.2 ( 0.6 x2 ) + 1 = 0.72 x2 + 1 + = 0.72 x0 - 0.72 x1 + 1 : [-0.44, 2.44] + x10.lb = ( 0.76 x6 + 0.672 ) + ( 0.6 x7 + 0.8 ) + 1 = 0.76 x6 + 0.6 x7 + 2.472 + >= 0.76 ( x4 + x5 ) + 0.6 ( x4 - x5 ) + 2.472 = 1.36 x4 + 0.16 x5 + 2.472 + >= 1.36 ( 0.6 x2 + 0.8 ) + 0.16 ( 0.6 x3 + 0.8 ) + 2.472 + = 0.816 x2 + 0.096 x3 + 3.688 = 0.912 x0 - 0.72 x1 + 3.688 : [2.056, 5.32] + x10 range: [-0.44, 5.32] + + Layer 4: + Using x10 = x8 + x9 + 1, x11 = x9: + x8 + x9 + 1 <= x10 <= x8 + x9 + 1 + x9 <= x11 <= x9 + + Layer 3: + Using 0.6 x6 <= x8 <= 0.76 x6 + 0.672, 0.6 x7 <= x9 <= 0.6 x7 + 0.8: + 0.6 x6 + 0.6 x7 + 1 <= x10 <= 0.76 x6 + 0.6 x7 + 2.472 + 0.6 x7 <= x11 <= 0.6 x7 + 0.8 + + Layer 2: + Using x6 = x4 + x5, x7 = x4 - x5: + 1.2 x4 + 1 <= x10 <= 1.36 x4 + 0.16 x5 + 2.472 + 0.6 x4 - 0.6 x5 <= x11 <= 0.6 x4 - 0.6 x5 + 0.8 + + Layer 1: + Using 0.6 x2 <= x4 <= 0.6 x2 + 0.8, 0.6 x3 <= x5 <= 0.6 x3 + 0.8: + 0.72 x2 + 1 <= x10 <= 0.816 x2 + 0.096 x3 + 3.688 + 0.36 x2 - 0.36 x3 - 0.48 <= x11 <= 0.36 x2 - 0.36 x3 + 1.28 + + Layer 0: + Using x2 = x0 + x1, x3 = x0 - x1: + 0.72 x0 + 0.72 x1 + 1 <= x10 <= 0.912 x0 + 0.72 x1 + 3.688 + 0.72 x1 - 0.48 <= x11 <= 0.72 x1 + 1.28 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0.6, 0, 0, 0.6 } ), + Vector( { 0.6, 0, 0, 0.6 } ), + Vector( { 0, 0 } ), + Vector( { 0.8, 0.8 } ) ); + + comparePredecessorLayerSymbolicBounds( nlr, + 4, + Vector( { 0.6, 0, 0, 0.6 } ), + Vector( { 0.76, 0, 0, 0.6 } ), + Vector( { 0, 0 } ), + Vector( { 0.672, 0.8 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 5, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 4, + Vector( { 1, 0, 1, 1 } ), + Vector( { 1, 0, 1, 1 } ), + Vector( { 1, 0 } ), + Vector( { 1, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 0.6, 0, 0.6, 0.6 } ), + Vector( { 0.76, 0, 0.6, 0.6 } ), + Vector( { 1, 0 } ), + Vector( { 2.472, 0.8 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1.2, 0.6, 0, -0.6 } ), + Vector( { 1.36, 0.6, 0.16, -0.6 } ), + Vector( { 1, 0 } ), + Vector( { 2.472, 0.8 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0.72, 0.36, 0, -0.36 } ), + Vector( { 0.816, 0.36, 0.096, -0.36 } ), + Vector( { 1, -0.48 } ), + Vector( { 3.688, 1.28 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 0.72, 0, 0.72, 0.72 } ), + Vector( { 0.912, 0, 0.72, 0.72 } ), + Vector( { 1, -0.48 } ), + Vector( { 3.688, 1.28 } ) ); + } + + void test_parameterised_symbolic_bound_maps_sigmoids_and_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSigmoidsAndRound( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + // Layer 1 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); + + // Layer 2 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); + + // Layer 3 + /* + Double-check with Python + --- + from math import exp as e + def g(x): + return 1 / (1 + e(-x)) + + def g_prime(x): + return g(x) * (1 - g(x)) + + def lam(l, u): + return (g(u) - g(l)) / (u - l) + + def lam_prime(l, u): + return min(g_prime(l), g_prime(u)) + + l3 = l4 = -2 + u3 = u4 = 2 + l5 = l6 = g(-2) + u5 = u6 = g(2) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) + x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) + x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) + x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) + print(x7_l) + print(x7_u) + print(x8_l) + print(x8_u) + + ''' + Sigmoid linear relaxation ( Layer 2 ): + x4 >= lambda7_prime * x2 + ( g(l3) - lambda7_prime * l3 ) + x4 <= lambda7_prime * x2 + ( g(u3) - lambda7_prime * u3 ) + x5 >= lambda8_prime * x3 + ( g(l4) - lambda8_prime * l4 ) + x5 <= lambda8_prime * x3 + ( g(u4) - lambda7_prime * u4 ) + ''' + print('------------------') + print(lambda7_prime) + print(lambda8_prime) + print(g(l3) - lambda7_prime * l3) + print(g(u3) - lambda7_prime * u3) + print(g(l4) - lambda8_prime * l4) + print(g(u4) - lambda8_prime * u4) + + + --- + [output]: + 0.4483930148512481 + 1.5516069851487517 + -0.5516069851487517 + 0.5516069851487517 + ------------------ + 0.1049935854035065 + 0.1049935854035065 + 0.3291900928291306 + 0.6708099071708693 + 0.3291900928291306 + 0.6708099071708693 + */ + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); + + // Layer 4 + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SIGMOID): + 0.1050 x2 + 0.3292 <= x4 <= 0.1050 x2 + 0.6708 + 0.1050 x3 + 0.3292 <= x5 <= 0.1050 x3 + 0.6708 + + Layer 4 (ROUND): + x6 - 0.5 <= x8 <= x6 + 0.5 + x7 - 0.5 <= x9 <= x7 + 0.5 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 4: + x8 <= x8 <= x8 + x9 <= x9 <= x9 + + Layer 3: + Using x6 - 0.5 <= x8 <= x6 + 0.5, x7 - 0.5 <= x9 <= x7 + 0.5: + x6 - 0.5 <= x8 <= x6 + 0.5 + x7 - 0.5 <= x9 <= x7 + 0.5 + + Layer 2: + Using x6 = x4 + x5, x7 = x4 - x5: + x4 + x5 - 0.5 <= x8 <= x4 + x5 + 0.5 + x4 - x5 - 0.5 <= x9 <= x4 - x5 + 0.5 + + Layer 1: + Using + 0.1050 x2 + 0.3292 <= x4 <= 0.1050 x2 + 0.6708, + 0.1050 x3 + 0.3292 <= x5 <= 0.1050 x3 + 0.6708: + 0.1050 x2 + 0.1050 x3 + 0.1584 <= x8 <= 0.1050 x2 + 0.1050 x3 + 1.8416 + 0.1050 x2 - 0.1050 x3 - 0.8416 <= x9 <= 0.1050 x2 - 0.1050 x3 + 0.8516 + + Layer 0: + Using x2 = x0 + x1, x3 = x0 - x1: + 0.2100 x0 + 0.1584 <= x8 <= 0.2100 x0 + 1.8416 + 0.2100 x1 - 0.8416 <= x9 <= 0.2100 x1 + 0.8516 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0.1050, 0, 0, 0.1050 } ), + Vector( { 0.1050, 0, 0, 0.1050 } ), + Vector( { 0.3292, 0.3292 } ), + Vector( { 0.6708, 0.6708 } ) ); + comparePredecessorLayerSymbolicBounds( nlr, + 4, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 4, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, 1, 1, -1 } ), + Vector( { 1, 1, 1, -1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), + Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), + Vector( { 0.1584, -0.8416 } ), + Vector( { 1.8416, 0.8416 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 0.2100, 0, 0, 0.2100 } ), + Vector( { 0.2100, 0, 0, 0.2100 } ), + Vector( { 0.1584, -0.8416 } ), + Vector( { 1.8416, 0.8416 } ) ); + } + + void test_parameterised_symbolic_bound_maps_max_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 2] + + Layers 1, 2, 3: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 3] + x2.ub = x0 + x1 : [-2, 3] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-3, 2] + x3.ub = x0 - x1 : [-3, 2] + + Both ReLUs are undecided, bounds are concretized. Using custom ReLU lower + coefficient of 0.5.\ + Upper coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6. + Upper coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 + + 0.5 x2 <= x4 <= 0.6 x2 + 1.2 + x4.lb = 0.5 ( x0 + x1 ) = 0.5 x0 + 0.5 x1 + x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 + x4 range: [-1, 3] + + 0.5 x3 <= x5 <= 0.4 x3 + 1.2 + x5.lb = 0.5 ( x0 - x1 ) = 0.5 x0 - 0.5 x1 + x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 + x5 range: [-1.5, 2] + + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x4, and its upper bound is constant 3. + + x4 <= x6 <= 3 + x6.lb = 0.5 x0 + 0.5 x1 : [-1, 1.5] + x6.ub = 3 : [3, 3] + x6 range: [-1, 3] + + Layer 4: + + x7 = 2x6 + => 2x4 <= x7 <= 6 + x7.lb = 2 ( 0.5 x0 + 0.5 x1 ) = x0 + x1 : [-2, 3] + x7.ub = 2 ( 3 ) = 6 : [6, 6] + x7 range: [-2, 6] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 3, Tightening::UB ), + Tightening( 3, -3, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -1.5, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), + Tightening( 7, 6, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0.5 x2 <= x4 <= 0.6 x2 + 1.2 + 0.5 x3 <= x5 <= 0.4 x3 + 1.2 + + Layer 3 (MAX): + x4 <= x6 <= 6 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 4: + x7 <= x7 <= x7 + + Layer 3: + Using x7 = 2x6: + 2x6 <= x7 <= 2x6 + + Layer 2: + Using x5 <= x6 <= 3: + 2x4 <= x7 <= 6 + + Layer 1: + Using 0.5 x2 <= x4 <= 0.6 x2 + 1.2: + x2 <= x7 <= 6 + + Layer 0: + Using x2 = x0 + x1: + x0 + x1 <= x7 <= 6 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0.5, 0, 0, 0.5 } ), + Vector( { 0.6, 0, 0, 0.4 } ), + Vector( { 0, 0 } ), + Vector( { 1.2, 1.2 } ) ); + comparePredecessorLayerSymbolicBounds( nlr, + 3, + Vector( { 1, 0 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 3 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 4, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 2 } ), + Vector( { 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 2, 0 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 6 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 1, 0 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 6 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 1, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 6 } ) ); + } + + void test_parameterised_symbolic_bound_maps_max_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -3 ); + tableau.setUpperBound( 1, -2 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-3, -2] + + Layer 1: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 0] + x2.ub = x0 + x1 : [-2, 0] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [3, 5] + x3.ub = x0 - x1 : [3, 5] + + First ReLU is negative, bounds become constant 0 + Second ReLU is positive, bounds survive the activation + + 0 <= x4 <= 0 + x4: all set to 0 + + x3 <= x5 <= x3 + x5.lb = x0 - x1 : [3, 5] + x5.ub = x0 - x1 : [3, 5] + + Max is fixed because x5.lb > x4.ub, it inherits x5's bounds + + x5 <= x6 <= x5 + => x3 <= x6 <= x5 + x6.lb = x0 - x1 : [3, 5] + x6.ub = x0 - x1 : [3, 5] + + Layer 3: + + x7 = 2x6 + => x7 = 2x5 = 2x3 = 2x0 - 2x1 + x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 0, Tightening::UB ), + Tightening( 3, 3, Tightening::LB ), + Tightening( 3, 5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 3, Tightening::LB ), + Tightening( 5, 5, Tightening::UB ), + Tightening( 6, 3, Tightening::LB ), + Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 6, Tightening::LB ), + Tightening( 7, 10, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (RELU): + 0 <= x4 <= 0 + x3 <= x5 <= x3 + + Layer 3 (MAX): + x5 <= x6 <= x5 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 4: + x7 <= x7 <= x7 + + Layer 3: + Using x7 = 2x6: + 2x6 <= x7 <= 2x6 + + Layer 2: + Using x5 <= x6 <= x5: + 2x5 <= x7 <= 2x5 + + Layer 1: + Using x3 <= x5 <= x3: + 2x3 <= x7 <= 2x3 + + Layer 0: + Using x3 = x0 - x1 + 2x0 - 2x1 <= x7 <= 2x0 - 2x1 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + comparePredecessorLayerSymbolicBounds( nlr, + 3, + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 4, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 2 } ), + Vector( { 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 0, 2 } ), + Vector( { 0, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0, 2 } ), + Vector( { 0, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 2, -2 } ), + Vector( { 2, -2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + } + + void test_parameterised_symbolic_bound_maps_softmax1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + } + + void test_parameterised_symbolic_bound_maps_softmax2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + */ + + unsigned size = nlr.getLayer( 2 )->getSize(); + Vector sourceLbs = { 1.999899, 2.999899, -0.000003 }; + Vector sourceUbs = { 2.000102, 3.000102, 0.0001 }; + Vector sourceMids = { 2.0000005, 3.0000005, -0.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = + NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.4243, 0.4481, 0.1277 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.4243, 0.4480, 0.1277 } ) ) ); + + /* + Layer 2: + +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6 range: [ 0.2595, 0.2595 ] + +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 + x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 + x7 range: [ 0.7054, 0.7054 ] + +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277 + x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8 range: [ 0.0351, 0.0351 ] + + Layer 3: + + x9 = x6 + x7 + x8 + => x9 = ( 0.1922 - 0.1830 - 0.0091 ) x3 + ( -0.1830 + 0.2078 - 0.0248 ) x4 + ( + -0.0091 - 0.0248 + 0.0339 ) x5 + ( 0.4243 + 0.4481 + 0.1277 ) + + => x9 = 0.0001 x3 + 0 x4 + 0 x5 + 1.0001 + => ( Up to rounding ) 1 <= x9 <= 1. + x9.lb = 1 + x9.ub = 1 + x9 range: [ 1, 1 ] + + x10 = - x6 - x7 - x8 + => x10 = - ( 0.1922 - 0.1830 - 0.0091 ) x3 - ( -0.1830 + 0.2078 - 0.0248 ) x4 - ( + -0.0091 - 0.0248 + 0.0339 ) x5 - ( 0.4243 + 0.4481 + 0.1277 ) + + => x10 = - 0.0001 x3 - 0.0000 x4 - 0.0000 x5 - 1.0001 + => ( Up to rounding ) 1 <= x10 <= 1. + x10.lb = 1 + x10.ub = 1 + x10 range: [ -1, -1 ] + */ + + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SOFTMAX): +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x9 <= x9 <= x9 + x10 <= x10 <= x10 + + Layer 2: + Using x9 = x6 + x7 + x8, x10 = -x6 - x7 - x8: + x6 + x7 + x8 <= x9 <= x6 + x7 + x8 + -x6 - x7 - x8 <= x10 <= -x6 - x7 - x8 + + Layer 1: + Using +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243. +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481. +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277: + 1 <= x9 <= 1 + -1 <= x10 <= -1 + + Layer 0: + 1 <= x9 <= 1 + -1 <= x10 <= -1 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.4243, 0.4481, 0.1277 } ), + Vector( { 0.4243, 0.4480, 0.1277 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + } + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + */ + + unsigned size = nlr.getLayer( 2 )->getSize(); + Vector sourceLbs = { 1.999899, 2.999899, -0.000003 }; + Vector sourceUbs = { 2.000102, 3.000102, 0.0001 }; + Vector sourceMids = { 2.0000005, 3.0000005, -0.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::ERLowerBound( sourceMids, sourceLbs, sourceUbs, i ); // Using er + symbolicUpperBias[i] = + NLR::Layer::ERUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dERLowerBound( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dERUpperBound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.4243, 0.4481, 0.1277 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.4243, 0.4480, 0.1277 } ) ) ); + + /* + Layer 2: + +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6 range: [ 0.2595, 0.2595 ] + +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 + x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 + x7 range: [ 0.7054, 0.7054 ] + +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277 + x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8 range: [ 0.0351, 0.0351 ] + + Layer 3: + + x9 = x6 + x7 + x8 + => x9 = ( 0.1922 - 0.1830 - 0.0091 ) x3 + ( -0.1830 + 0.2078 - 0.0248 ) x4 + ( + -0.0091 - 0.0248 + 0.0339 ) x5 + ( 0.4243 + 0.4481 + 0.1277 ) + + => x9 = 0.0001 x3 + 0 x4 + 0 x5 + 1.0001 + => ( Up to rounding ) 1 <= x9 <= 1. + x9.lb = 1 + x9.ub = 1 + x9 range: [ 1, 1 ] + + x10 = - x6 - x7 - x8 + => x10 = - ( 0.1922 - 0.1830 - 0.0091 ) x3 - ( -0.1830 + 0.2078 - 0.0248 ) x4 - ( + -0.0091 - 0.0248 + 0.0339 ) x5 - ( 0.4243 + 0.4481 + 0.1277 ) + + => x10 = - 0.0001 x3 - 0.0000 x4 - 0.0000 x5 - 1.0001 + => ( Up to rounding ) 1 <= x10 <= 1. + x10.lb = 1 + x10.ub = 1 + x10 range: [ -1, -1 ] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SOFTMAX): +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x9 <= x9 <= x9 + x10 <= x10 <= x10 + + Layer 2: + Using x9 = x6 + x7 + x8, x10 = -x6 - x7 - x8: + x6 + x7 + x8 <= x9 <= x6 + x7 + x8 + -x6 - x7 - x8 <= x10 <= -x6 - x7 - x8 + + Layer 1: + Using +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243. +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481. +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277: + 1 <= x9 <= 1 + -1 <= x10 <= -1 + + Layer 0: + 1 <= x9 <= 1 + -1 <= x10 <= -1 + */ + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.4243, 0.4481, 0.1277 } ), + Vector( { 0.4243, 0.4480, 0.1277 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + } + } + + void test_parameterised_symbolic_bound_maps_softmax3() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax2( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + + x6 = -x0 - x1 - x2 + 2 + x6.lb = -x0 - x1 - x2 + 2 : [ -1.000003, -1 ] + x6.ub = -x0 - x1 - x2 + 2 : [ -1.000003, -1 ] + x6 range: [ -1.000003, -1 ] + + x7 = -x0 - x1 - x2 + 1 + x7.lb = -x0 - x1 - x2 + 1 : [ -2.000003, -2 ] + x7.ub = -x0 - x1 - x2 + 1 : [ -2.000003, -2 ] + x7 range: [ -2.000003, -2 ] + */ + + // First Sigmoid: x8 x10 x12 = softmax( x3, x5, x7 ). + unsigned size = nlr.getLayer( 2 )->getActivationSources( 0 ).size(); + Vector sourceLbs = { 1.999899, -0.000003, -2.000103 }; + Vector sourceUbs = { 2.000102, 0.0001, -1.999 }; + Vector sourceMids = { 2.0000005, -0.0000015, -2.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.8668, 0.1173, 0.0159 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.8668, 0.1173, 0.0159 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1155, + -0.1017, + -0.0138, + -0.1017, + 0.1035, + -0.0019, + -0.0138, + -0.0019, + 0.0156 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1154, + -0.1017, + -0.0138, + -0.1017, + 0.1036, + -0.0019, + -0.0138, + -0.0019, + 0.0156 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.6084, 0.3170, 0.0747 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.6084, 0.3170, 0.0747 } ) ) ); + + // Second Sigmoid: x9 x11 = softmax( x4, x6 ). + size = nlr.getLayer( 2 )->getActivationSources( 1 ).size(); + sourceLbs = Vector( { 2.999899, -1.000103 } ); + sourceUbs = Vector( { 3.000102, -0.9999 } ); + sourceMids = Vector( { 3.0000005, -1.0000015 } ); + targetLbs = Vector( size, 0 ); + targetUbs = Vector( size, 0 ); + symbolicLb = Vector( size * size, 0 ); + symbolicUb = Vector( size * size, 0 ); + symbolicLowerBias = Vector( size, 0 ); + symbolicUpperBias = Vector( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.9820, 0.0180 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.9820, 0.0180 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLb, Vector( { 0.0177, -0.0177, -0.0177, 0.0177 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUb, Vector( { 0.0177, -0.0177, -0.0177, 0.0177 } ) ) ); + TS_ASSERT( compareVectors( symbolicLowerBias, Vector( { 0.9114, 0.0886 } ) ) ); + TS_ASSERT( compareVectors( symbolicUpperBias, Vector( { 0.9114, 0.0886 } ) ) ); + + /* + Layer 2: + + First Sigmoid: x8 x10 x12 = softmax( x3, x5, x7 ). +0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1154 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 + x8.lb = 0.2310 x0 + 0.0001 x1 + 0.2310 x2 + 0.4051 + x8.ub = 0.2310 x0 + 0.0000 x1 + 0.2310 x2 + 0.4050 + x8 range: [ 0.8668, 0.8668 ] + +-0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + 0.3170 + x10.lb = -0.2033 x0 + 0.0001 x1 - 0.2033 x2 + 0.5239 + x10.ub = -0.2033 x0 + 0.0000 x1 - 0.2033 x2 + 0.5241 + x10 range: [ 0.1173, 0.1173 ] + +-0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 + x12.lb = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 + x12.ub = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 + x12 range: [ 0.0159, 0.0159 ] + + Second Sigmoid: x9 x11 = softmax( x4, x6 ). +0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 + x9.lb = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 + x9.ub = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 + x9 range: [ 0.9820, 0.0180 ] + +-0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 + x11.lb = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 + x11.ub = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 + x11 range: [ 0.9820, 0.0180 ] + + Layer 3: + + x13 = x8 + x10 + x12 + => x13 = ( 0.1155 - 0.1017 - 0.0138 ) x3 + ( -0.1017 + 0.1035 - 0.0019 ) x5 + + ( -0.0138 - 0.0019 + 0.0156 ) x7 + ( 0.6084 + 0.3170 + 0.0747 ) + + => x13 = 0 x3 - 0.0001 x5 - 0.0001 x7 + 1.0001 + => ( Up to rounding ) 1 <= x13 <= 1. + x13.lb = 1 + x13.ub = 1 + x13 range: [ 1, 1 ] + + x14 = - x8 - x10 - x12 + => x14 = - ( 0.1155 - 0.1017 - 0.0138 ) x3 - ( -0.1017 + 0.1035 - 0.0019 ) x5 + - ( -0.0138 - 0.0019 + 0.0156 ) x7 - ( 0.6084 + 0.3170 + 0.0747 ) + + => x14 = 0 x3 + 0.0001 x5 + 0.0001 x7 - 1.0001 + => ( Up to rounding ) -1 <= x14 <= -1. + x14.lb = -1 + x14.ub = -1 + x14 range: [ -1, -1 ] + + x15 = x9 + x11 + => x15 = ( 0.0177 - 0.0177 ) x4 + ( -0.0177 + 0.0177 ) x6 + ( 0.9114 + 0.0886 ) + + => x15 = 0 x4 + 0 x6 + 1 + => ( Up to rounding ) 1 <= x15 <= 1. + x15.lb = 1 + x15.ub = 1 + x15 range: [ 1, 1 ] + + x16 = - x9 - x11 + => x16 = - ( 0.0177 - 0.0177 ) x4 - ( -0.0177 + 0.0177 ) x6 - ( 0.9114 + 0.0886 ) + + => x16 = 0 x4 + 0 x6 - 1 + => ( Up to rounding ) -1 <= x16 <= -1. + x16.lb = -1 + x16.ub = -1 + x16 range: [ -1, -1 ] + */ + + List expectedBounds( { + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.86681, Tightening::LB ), Tightening( 8, 0.86682, Tightening::UB ), + Tightening( 9, 0.98201, Tightening::LB ), Tightening( 9, 0.98201, Tightening::UB ), + Tightening( 10, 0.11731, Tightening::LB ), Tightening( 10, 0.11731, Tightening::UB ), + Tightening( 11, 0.017985, Tightening::LB ), Tightening( 11, 0.017986, Tightening::UB ), + Tightening( 12, 0.015875, Tightening::LB ), Tightening( 12, 0.015876, Tightening::UB ), + Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), + Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), + Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SOFTMAX): +0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1154 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 +0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 +-0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + 0.3170 +-0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 +-0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x13 <= x13 <= x13 + x14 <= x14 <= x14 + x15 <= x15 <= x15 + x16 <= x16 <= x16 + + Layer 2: + Using x13 = x8 + x10 + x12, x14 = -x8 - x10 - x12, x15 = x9 + x11, x16 = -x9 - x11: + x8 + x10 + x12 <= x13 <= x8 + x10 + x12 + -x8 - x10 - x12 <= x14 <= -x8 - x10 - x12 + x9 + x11 <= x15 <= x9 + x11 + -x9 - x11 <= x16 <= -x9 - x11 + + Layer 1: + Using +0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1154 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 +0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 +-0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + 0.3170 +-0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 +-0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 + 1 <= x13 <= 1 + -1 <= x14 <= -1 + 1 <= x15 <= 1 + -1 <= x16 <= -1 + + Layer 0: + 1 <= x13 <= 1 + -1 <= x14 <= -1 + 1 <= x15 <= 1 + -1 <= x16 <= -1 + */ + comparePredecessorLayerSymbolicBounds( + nlr, + 2, + Vector( { 0.1155, 0.0000, -0.1017, 0.0000, -0.0138, 0.0000, 0.0177, + 0.0000, -0.0177, 0.0000, -0.1017, 0.0000, 0.1035, 0.0000, + -0.0019, 0.0000, -0.0177, 0.0000, 0.0177, 0.0000, -0.0138, + 0.0000, -0.0019, 0.0000, 0.0156 } ), + Vector( { 0.1155, 0.0000, -0.1017, 0.0000, -0.0138, 0.0000, 0.0177, + 0.0000, -0.0177, 0.0000, -0.1017, 0.0000, 0.1035, 0.0000, + -0.0019, 0.0000, -0.0177, 0.0000, 0.0177, 0.0000, -0.0138, + 0.0000, -0.0019, 0.0000, 0.0156 } ), + Vector( { 0.6084, 0.9114, 0.3170, 0.0886, 0.0747 } ), + Vector( { 0.6084, 0.9114, 0.3170, 0.0886, 0.0747 } ) ); + + compareOutputLayerSymbolicBounds( + nlr, + 3, + Vector( { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 } ), + Vector( { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0 } ) ); + compareOutputLayerSymbolicBounds( + nlr, + 2, + Vector( { 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0 } ), + Vector( { 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0 } ), + Vector( { 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( 20, 0 ), + Vector( 20, 0 ), + Vector( { 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( 12, 0 ), + Vector( 12, 0 ), + Vector( { 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1 } ) ); + } + + void test_parameterised_symbolic_bound_maps_bilinear() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke parameterised initializeSymbolicBoundsMaps + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-2, 1] + + Layers 1, 2: + + x2 = x0 - 2x1 + x2.lb = x0 - 2x1 : [-1, 6] + x2.ub = x0 - 2x1 : [-1, 6] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [-1, 3] + x3.ub = x0 + x1 : [-1, 3] + + Using custom coefficients with alpha = { 0.5, 0.5 }. + Coefficients for bilinear layer: + Lower bound: + alpha_l = 0.5 x3.lb + ( 1 - 0.5 ) x3.ub = 0.5 * -1 + 0.5 * 3 = 1 + beta_l = 0.5 x2.lb + ( 1 - 0.5 ) x2.ub = 0.5 * -1 + 0.5 * 6 = 2.5 + gamma_l = -0.5 x2.lb x3.lb - ( 1 - 0.5 ) x2.ub x3.ub = -0.5 * -1 * -1 - 0.5 * 6 * 3 = + -9.5. + + Upper bound: + alpha_l = 0.5 x3.ub + ( 1 - 0.5 ) x3.lb = 0.5 * -1 + 0.5 * 3 = 1 + beta_l = 0.5 x2.lb + ( 1 - 0.5 ) x2.ub = 0.5 * -1 + 0.5 * 6 = 2.5 + gamma_l = -0.5 x2.lb x3.ub - ( 1 - 0.5 ) x2.ub x3.lb = -0.5 * -1 * 6 - 0.5 * -1 * 3 + = 4.5. + + S = { x2.lb x3.lb, x2.ub x3.lb, x2.lb x3.ub, x2.ub x3.ub } = { 1, -3, -6, 18 } + -6 <= min S <= x4 <= max S = 18 + x2 + 2.5 x3 - 9.5 <= x4 <= x2 + 2.5 x3 + 4.5 + x4.lb = 1 ( x0 - 2x1 ) + 2.5 ( x0 + x1 ) - 9.5 = 3.5 x0 + 0.5 x1 - 9.5 : [-7, -2] + x4.ub = 1 ( x0 - 2x1 ) + 2.5 ( x0 + x1 ) + 4.5 = 3.5 x0 + 0.5 x1 + 4.5 : [7, 12] + x4 range: [-6, 18] + + Layer 3: + + x5 = -x4 : [-18, 6] + => -x2 - 2.5 x3 - 4.5 <= x4 <= -x2 - 2.5 x3 + 9.5 + x5.lb = -1 ( 3.5 x0 + 0.5 x1 + 4.5 ) = -3.5 x0 - 0.5 x1 - 4.5 : [-12, 0] + x5.ub = -1 ( 3.5 x0 + 0.5 x1 - 9.5 ) = -3.5 x0 - 0.5 x1 + 9.5 : [2, 7] + x5 range: [-12, 6] + */ + + List expectedBounds( { Tightening( 2, -1, Tightening::LB ), + Tightening( 2, 6, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 3, Tightening::UB ), + Tightening( 4, -6, Tightening::LB ), + Tightening( 4, 18, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), + Tightening( 5, 6, Tightening::UB ) } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (BILINEAR): + x2 + 2.5 x3 - 9.5 <= x4 <= x2 + 2.5 x3 + 4.5 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 3: + x4 <= x5 <= x4 + + Layer 2: + Using x5 = -x4: + -x4 <= x5 <= -x4 + + Layer 1: + Using x2 + 2.5 x3 - 9.5 <= x4 <= x2 + 2.5 x3 + 4.5: + -x2 - 2.5 x3 - 4.5 <= x5 <= -x2 - 2.5 x3 + 9.5 + + Layer 0: + Using x2 = x0 - 2x1, x3 = x0 + x1: + -3.5 x0 - 0.5 x1 - 4.5 <= x5 <= -3.5 x0 - 0.5 x1 + 9.5 + */ + + comparePredecessorLayerSymbolicBounds( nlr, + 2, + Vector( { 1, 2.5 } ), + Vector( { 1, 2.5 } ), + Vector( { -9.5 } ), + Vector( { 4.5 } ) ); + + compareOutputLayerSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 2, + Vector( { -1 } ), + Vector( { -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 1, + Vector( { -1, -2.5 } ), + Vector( { -1, -2.5 } ), + Vector( { -4.5 } ), + Vector( { 9.5 } ) ); + compareOutputLayerSymbolicBounds( nlr, + 0, + Vector( { -3.5, -0.5 } ), + Vector( { -3.5, -0.5 } ), + Vector( { -4.5 } ), + Vector( { 9.5 } ) ); + } + + bool boundsEqual( const List &bounds, const List &expectedBounds ) + { + if ( bounds.size() != expectedBounds.size() ) + return false; + + bool allFound = true; + for ( const auto &bound : bounds ) + { + bool currentFound = false; + for ( const auto &expectedBound : expectedBounds ) + { + currentFound |= + ( bound._type == expectedBound._type && + bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + } + allFound &= currentFound; + } + return allFound; + } + + void updateTableau( MockTableau &tableau, List &tightenings ) + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + { + tableau.setLowerBound( tightening._variable, tightening._value ); + } + + if ( tightening._type == Tightening::UB ) + { + tableau.setUpperBound( tightening._variable, tightening._value ); + } + } + } + + void comparePredecessorLayerSymbolicBounds( NLR::NetworkLevelReasoner &nlr, + unsigned layerIndex, + const Vector &symbolicLb, + const Vector &symbolicUb, + const Vector &symbolicLowerBias, + const Vector &symbolicUpperBias ) + { + Vector predecessorLayerSymbolicLb; + Vector predecessorLayerSymbolicUb; + Vector predecessorLayerSymbolicLowerBias; + Vector predecessorLayerSymbolicUpperBias; + TS_ASSERT_THROWS_NOTHING( predecessorLayerSymbolicLb = + nlr.getSymbolicLbInTermsOfPredecessor( layerIndex ) ); + TS_ASSERT_THROWS_NOTHING( predecessorLayerSymbolicUb = + nlr.getSymbolicUbInTermsOfPredecessor( layerIndex ) ); + TS_ASSERT_THROWS_NOTHING( predecessorLayerSymbolicLowerBias = + nlr.getSymbolicLowerBiasInTermsOfPredecessor( layerIndex ) ); + TS_ASSERT_THROWS_NOTHING( predecessorLayerSymbolicUpperBias = + nlr.getSymbolicUpperBiasInTermsOfPredecessor( layerIndex ) ); + TS_ASSERT( compareVectors( predecessorLayerSymbolicLb, symbolicLb ) ); + TS_ASSERT( compareVectors( predecessorLayerSymbolicUb, symbolicUb ) ); + TS_ASSERT( compareVectors( predecessorLayerSymbolicLowerBias, symbolicLowerBias ) ); + TS_ASSERT( compareVectors( predecessorLayerSymbolicUpperBias, symbolicUpperBias ) ); + } + + void compareOutputLayerSymbolicBounds( NLR::NetworkLevelReasoner &nlr, + unsigned layerIndex, + const Vector &symbolicLb, + const Vector &symbolicUb, + const Vector &symbolicLowerBias, + const Vector &symbolicUpperBias ) + { + Vector outputLayerSymbolicLb; + Vector outputLayerSymbolicUb; + Vector outputLayerSymbolicLowerBias; + Vector outputLayerSymbolicUpperBias; + TS_ASSERT_THROWS_NOTHING( outputLayerSymbolicLb = + nlr.getOutputLayerSymbolicLb( layerIndex ) ); + TS_ASSERT_THROWS_NOTHING( outputLayerSymbolicUb = + nlr.getOutputLayerSymbolicUb( layerIndex ) ); + TS_ASSERT_THROWS_NOTHING( outputLayerSymbolicLowerBias = + nlr.getOutputLayerSymbolicLowerBias( layerIndex ) ); + TS_ASSERT_THROWS_NOTHING( outputLayerSymbolicUpperBias = + nlr.getOutputLayerSymbolicUpperBias( layerIndex ) ); + TS_ASSERT( compareVectors( outputLayerSymbolicLb, symbolicLb ) ); + TS_ASSERT( compareVectors( outputLayerSymbolicUb, symbolicUb ) ); + TS_ASSERT( compareVectors( outputLayerSymbolicLowerBias, symbolicLowerBias ) ); + TS_ASSERT( compareVectors( outputLayerSymbolicUpperBias, symbolicUpperBias ) ); + } + + bool compareVectors( const Vector &vectorA, const Vector &vectorB ) + { + if ( vectorA.size() != vectorB.size() ) + return false; + + for ( unsigned i = 0; i < vectorA.size(); ++i ) + { + if ( !FloatUtils::areEqual( vectorA[i], vectorB[i], 0.0001 ) ) + return false; + } + + return true; + } +}; From 6f4b37e3ee1b50829a38b8db684aff5f61a93932 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Sun, 24 Aug 2025 21:13:38 +0300 Subject: [PATCH 69/81] Changed variable names for initializeSymbolicBoundsMaps and related functions in in DeepPoly module, Restored Test_PMNR.h, removed Gurobi guard for Test_PMNRSelection.h, resumed implementating selectConstraints/generatePolygonalTightenings and related functions. --- src/input_parsers/OnnxParser.cpp | 35 + src/input_parsers/OnnxParser.h | 1 + src/nlr/CMakeLists.txt | 3 +- src/nlr/DeepPolyAbsoluteValueElement.cpp | 12 +- src/nlr/DeepPolyAnalysis.cpp | 70 +- src/nlr/DeepPolyAnalysis.h | 40 +- src/nlr/DeepPolyBilinearElement.cpp | 14 +- src/nlr/DeepPolyElement.cpp | 55 +- src/nlr/DeepPolyElement.h | 41 +- src/nlr/DeepPolyLeakyReLUElement.cpp | 12 +- src/nlr/DeepPolyMaxPoolElement.cpp | 22 +- src/nlr/DeepPolyReLUElement.cpp | 12 +- src/nlr/DeepPolyRoundElement.cpp | 12 +- src/nlr/DeepPolySigmoidElement.cpp | 12 +- src/nlr/DeepPolySignElement.cpp | 12 +- src/nlr/DeepPolySoftmaxElement.cpp | 10 +- src/nlr/DeepPolyWeightedSumElement.cpp | 4 +- src/nlr/NetworkLevelReasoner.cpp | 551 +- src/nlr/NetworkLevelReasoner.h | 53 +- src/nlr/tests/Test_PMNR.h | 11387 +++++++++++++++++++++ src/nlr/tests/Test_PMNRSelection.h | 3086 +++--- 21 files changed, 13386 insertions(+), 2058 deletions(-) create mode 100644 src/nlr/tests/Test_PMNR.h diff --git a/src/input_parsers/OnnxParser.cpp b/src/input_parsers/OnnxParser.cpp index 758bab6232..34454d94a3 100644 --- a/src/input_parsers/OnnxParser.cpp +++ b/src/input_parsers/OnnxParser.cpp @@ -957,6 +957,10 @@ void OnnxParser::makeMarabouEquations( onnx::NodeProto &node, bool makeEquations { tanhEquations( node, makeEquations ); } + else if ( strcmp( nodeType, "Sign" ) == 0 ) + { + signEquations( node, makeEquations ); + } else { unsupportedError( node ); @@ -1816,6 +1820,37 @@ void OnnxParser::reluEquations( onnx::NodeProto &node, bool makeEquations ) } } +/** + * @brief Function to generate equations corresponding to pointwise Sign + * Implements https://github.com/onnx/onnx/blob/main/docs/Operators.md#Sign, up to value at 0 + * + * @param node ONNX node representing the Sign operation + * @param makeEquations True if we need to create new variables and add new Relus + */ +void OnnxParser::signEquations( onnx::NodeProto &node, bool makeEquations ) +{ + String outputNodeName = node.output()[0]; + String inputNodeName = node.input()[0]; + + _shapeMap[outputNodeName] = _shapeMap[inputNodeName]; + if ( !makeEquations ) + return; + + // Get variables + Vector inputVars = _varMap[inputNodeName]; + Vector outputVars = makeNodeVariables( outputNodeName, false ); + ASSERT( inputVars.size() == outputVars.size() ); + + // Generate equations + for ( PackedTensorIndices i = 0; i < inputVars.size(); i++ ) + { + int inputVar = inputVars[i]; + int outputVar = outputVars[i]; + _query.addSignConstraint( inputVar, outputVar ); + } +} + + /** * @brief Function to generate equations corresponding to leaky pointwise Relu * Implements https://github.com/onnx/onnx/blob/main/docs/Operators.md#LeakyRelu diff --git a/src/input_parsers/OnnxParser.h b/src/input_parsers/OnnxParser.h index 2b316a2004..66d05866e2 100644 --- a/src/input_parsers/OnnxParser.h +++ b/src/input_parsers/OnnxParser.h @@ -106,6 +106,7 @@ class OnnxParser double coefficient2 ); void matMulEquations( onnx::NodeProto &node, bool makeEquations ); void reluEquations( onnx::NodeProto &node, bool makeEquations ); + void signEquations( onnx::NodeProto &node, bool makeEquations ); void leakyReluEquations( onnx::NodeProto &node, bool makeEquations ); void sigmoidEquations( onnx::NodeProto &node, bool makeEquations ); void tanhEquations( onnx::NodeProto &node, bool makeEquations ); diff --git a/src/nlr/CMakeLists.txt b/src/nlr/CMakeLists.txt index b0bac42cc6..ef89c9492d 100644 --- a/src/nlr/CMakeLists.txt +++ b/src/nlr/CMakeLists.txt @@ -18,10 +18,11 @@ network_level_reasoner_add_unit_test(DeepPolyAnalysis) network_level_reasoner_add_unit_test(NetworkLevelReasoner) network_level_reasoner_add_unit_test(WsLayerElimination) network_level_reasoner_add_unit_test(ParallelSolver) +network_level_reasoner_add_unit_test(PMNRSelection) if (${ENABLE_GUROBI}) network_level_reasoner_add_unit_test(LPRelaxation) - network_level_reasoner_add_unit_test(PMNRSelection) + network_level_reasoner_add_unit_test(PMNR) endif() if (${BUILD_PYTHON}) diff --git a/src/nlr/DeepPolyAbsoluteValueElement.cpp b/src/nlr/DeepPolyAbsoluteValueElement.cpp index 7ad5fc4a6b..7d26b37354 100644 --- a/src/nlr/DeepPolyAbsoluteValueElement.cpp +++ b/src/nlr/DeepPolyAbsoluteValueElement.cpp @@ -95,7 +95,7 @@ void DeepPolyAbsoluteValueElement::execute( log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); } - if ( _storeSymbolicBoundsInTermsOfPredecessor ) + if ( _storePredecessorSymbolicBounds ) { storePredecessorSymbolicBounds(); } @@ -108,12 +108,10 @@ void DeepPolyAbsoluteValueElement::storePredecessorSymbolicBounds() for ( unsigned i = 0; i < _size; ++i ) { NeuronIndex sourceIndex = *( _layer->getActivationSources( i ).begin() ); - ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = - _symbolicLb[i]; - ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = - _symbolicUb[i]; - ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicLowerBias[i]; - ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicUpperBias[i]; + ( *_predecessorSymbolicLb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicUb[i]; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; } } diff --git a/src/nlr/DeepPolyAnalysis.cpp b/src/nlr/DeepPolyAnalysis.cpp index 7f94de75a8..0ea2cd67d5 100644 --- a/src/nlr/DeepPolyAnalysis.cpp +++ b/src/nlr/DeepPolyAnalysis.cpp @@ -39,23 +39,22 @@ namespace NLR { -DeepPolyAnalysis::DeepPolyAnalysis( - LayerOwner *layerOwner, - bool storeOutputLayerSymbolicBounds, - bool storeSymbolicBoundsInTermsOfPredecessor, - bool useParameterisedSBT, - Map> *layerIndicesToParameters, - Map> *outputLayerSymbolicLb, - Map> *outputLayerSymbolicUb, - Map> *outputLayerSymbolicLowerBias, - Map> *outputLayerSymbolicUpperBias, - Map> *symbolicLbInTermsOfPredecessor, - Map> *symbolicUbInTermsOfPredecessor, - Map> *symbolicLowerBiasInTermsOfPredecessor, - Map> *symbolicUpperBiasInTermsOfPredecessor ) +DeepPolyAnalysis::DeepPolyAnalysis( LayerOwner *layerOwner, + bool storeOutputSymbolicBounds, + bool storePredecessorSymbolicBounds, + bool useParameterisedSBT, + Map> *layerIndicesToParameters, + Map> *outputSymbolicLb, + Map> *outputSymbolicUb, + Map> *outputSymbolicLowerBias, + Map> *outputSymbolicUpperBias, + Map> *predecessorSymbolicLb, + Map> *predecessorSymbolicUb, + Map> *predecessorSymbolicLowerBias, + Map> *predecessorSymbolicUpperBias ) : _layerOwner( layerOwner ) - , _storeOutputLayerSymbolicBounds( storeOutputLayerSymbolicBounds ) - , _storeSymbolicBoundsInTermsOfPredecessor( storeSymbolicBoundsInTermsOfPredecessor ) + , _storeOutputSymbolicBounds( storeOutputSymbolicBounds ) + , _storePredecessorSymbolicBounds( storePredecessorSymbolicBounds ) , _useParameterisedSBT( useParameterisedSBT ) , _layerIndicesToParameters( layerIndicesToParameters ) , _work1SymbolicLb( NULL ) @@ -64,14 +63,14 @@ DeepPolyAnalysis::DeepPolyAnalysis( , _work2SymbolicUb( NULL ) , _workSymbolicLowerBias( NULL ) , _workSymbolicUpperBias( NULL ) - , _outputLayerSymbolicLb( outputLayerSymbolicLb ) - , _outputLayerSymbolicUb( outputLayerSymbolicUb ) - , _outputLayerSymbolicLowerBias( outputLayerSymbolicLowerBias ) - , _outputLayerSymbolicUpperBias( outputLayerSymbolicUpperBias ) - , _symbolicLbInTermsOfPredecessor( symbolicLbInTermsOfPredecessor ) - , _symbolicUbInTermsOfPredecessor( symbolicUbInTermsOfPredecessor ) - , _symbolicLowerBiasInTermsOfPredecessor( symbolicLowerBiasInTermsOfPredecessor ) - , _symbolicUpperBiasInTermsOfPredecessor( symbolicUpperBiasInTermsOfPredecessor ) + , _outputSymbolicLb( outputSymbolicLb ) + , _outputSymbolicUb( outputSymbolicUb ) + , _outputSymbolicLowerBias( outputSymbolicLowerBias ) + , _outputSymbolicUpperBias( outputSymbolicUpperBias ) + , _predecessorSymbolicLb( predecessorSymbolicLb ) + , _predecessorSymbolicUb( predecessorSymbolicUb ) + , _predecessorSymbolicLowerBias( predecessorSymbolicLowerBias ) + , _predecessorSymbolicUpperBias( predecessorSymbolicUpperBias ) { const Map &layers = _layerOwner->getLayerIndexToLayer(); // Get the maximal layer size @@ -173,7 +172,7 @@ void DeepPolyAnalysis::run() { if ( layer->neuronEliminated( j ) ) continue; - if ( _storeOutputLayerSymbolicBounds && index == _layerOwner->getNumberOfLayers() - 1 ) + if ( _storeOutputSymbolicBounds && index == _layerOwner->getNumberOfLayers() - 1 ) continue; double lb = deepPolyElement->getLowerBound( j ); if ( layer->getLb( j ) < lb ) @@ -269,22 +268,21 @@ DeepPolyElement *DeepPolyAnalysis::createDeepPolyElement( Layer *layer ) Layer *outputLayer = _layerIndexToLayer[_layerOwner->getNumberOfLayers() - 1]; unsigned outputLayerSize = outputLayer->getSize(); deepPolyElement->setOutputLayerSize( outputLayerSize ); - deepPolyElement->setStoreSymbolicBoundsInTermsOfPredecessor( - _storeSymbolicBoundsInTermsOfPredecessor ); + deepPolyElement->setStorePredecessorSymbolicBounds( _storePredecessorSymbolicBounds ); if ( layer->getLayerIndex() == _layerOwner->getNumberOfLayers() - 1 ) { - deepPolyElement->setStoreOutputLayerSymbolicBounds( _storeOutputLayerSymbolicBounds ); + deepPolyElement->setStoreOutputSymbolicBounds( _storeOutputSymbolicBounds ); } deepPolyElement->setUseParameterisedSBT( _useParameterisedSBT ); deepPolyElement->setLayerIndicesToParameters( _layerIndicesToParameters ); - deepPolyElement->setSymbolicBoundsMemory( _outputLayerSymbolicLb, - _outputLayerSymbolicUb, - _outputLayerSymbolicLowerBias, - _outputLayerSymbolicUpperBias, - _symbolicLbInTermsOfPredecessor, - _symbolicUbInTermsOfPredecessor, - _symbolicLowerBiasInTermsOfPredecessor, - _symbolicUpperBiasInTermsOfPredecessor ); + deepPolyElement->setSymbolicBoundsMemory( _outputSymbolicLb, + _outputSymbolicUb, + _outputSymbolicLowerBias, + _outputSymbolicUpperBias, + _predecessorSymbolicLb, + _predecessorSymbolicUb, + _predecessorSymbolicLowerBias, + _predecessorSymbolicUpperBias ); return deepPolyElement; } diff --git a/src/nlr/DeepPolyAnalysis.h b/src/nlr/DeepPolyAnalysis.h index 4e6ecdc791..158506dd25 100644 --- a/src/nlr/DeepPolyAnalysis.h +++ b/src/nlr/DeepPolyAnalysis.h @@ -29,26 +29,26 @@ class DeepPolyAnalysis { public: DeepPolyAnalysis( LayerOwner *layerOwner, - bool storeOutputLayerSymbolicBounds = false, - bool storeSymbolicBoundsInTermsOfPredecessor = false, + bool storeOutputSymbolicBounds = false, + bool storePredecessorSymbolicBounds = false, bool useParameterisedSBT = false, Map> *layerIndicesToParameters = NULL, - Map> *outputLayerSymbolicLb = NULL, - Map> *outputLayerSymbolicUb = NULL, - Map> *outputLayerSymbolicLowerBias = NULL, - Map> *outputLayerSymbolicUpperBias = NULL, - Map> *symbolicLbInTermsOfPredecessor = NULL, - Map> *symbolicUbInTermsOfPredecessor = NULL, - Map> *symbolicLowerBiasInTermsOfPredecessor = NULL, - Map> *symbolicUpperBiasInTermsOfPredecessor = NULL ); + Map> *outputSymbolicLb = NULL, + Map> *outputSymbolicUb = NULL, + Map> *outputSymbolicLowerBias = NULL, + Map> *outputSymbolicUpperBias = NULL, + Map> *predecessorSymbolicLb = NULL, + Map> *predecessorSymbolicUb = NULL, + Map> *predecessorSymbolicLowerBias = NULL, + Map> *predecessorSymbolicUpperBias = NULL ); ~DeepPolyAnalysis(); void run(); private: LayerOwner *_layerOwner; - bool _storeOutputLayerSymbolicBounds; - bool _storeSymbolicBoundsInTermsOfPredecessor; + bool _storeOutputSymbolicBounds; + bool _storePredecessorSymbolicBounds; bool _useParameterisedSBT; Map> *_layerIndicesToParameters; @@ -67,15 +67,15 @@ class DeepPolyAnalysis double *_workSymbolicLowerBias; double *_workSymbolicUpperBias; - Map> *_outputLayerSymbolicLb; - Map> *_outputLayerSymbolicUb; - Map> *_outputLayerSymbolicLowerBias; - Map> *_outputLayerSymbolicUpperBias; + Map> *_outputSymbolicLb; + Map> *_outputSymbolicUb; + Map> *_outputSymbolicLowerBias; + Map> *_outputSymbolicUpperBias; - Map> *_symbolicLbInTermsOfPredecessor; - Map> *_symbolicUbInTermsOfPredecessor; - Map> *_symbolicLowerBiasInTermsOfPredecessor; - Map> *_symbolicUpperBiasInTermsOfPredecessor; + Map> *_predecessorSymbolicLb; + Map> *_predecessorSymbolicUb; + Map> *_predecessorSymbolicLowerBias; + Map> *_predecessorSymbolicUpperBias; unsigned _maxLayerSize; diff --git a/src/nlr/DeepPolyBilinearElement.cpp b/src/nlr/DeepPolyBilinearElement.cpp index 67505a0c17..a937be7012 100644 --- a/src/nlr/DeepPolyBilinearElement.cpp +++ b/src/nlr/DeepPolyBilinearElement.cpp @@ -134,7 +134,7 @@ void DeepPolyBilinearElement::execute( } } - if ( _storeSymbolicBoundsInTermsOfPredecessor ) + if ( _storePredecessorSymbolicBounds ) { storePredecessorSymbolicBounds(); } @@ -155,16 +155,16 @@ void DeepPolyBilinearElement::storePredecessorSymbolicBounds() for ( unsigned i = 0; i < _size; ++i ) { List sources = _layer->getActivationSources( i ); - ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * sources.begin()->_neuron + i] = + ( *_predecessorSymbolicLb )[_layerIndex][_size * sources.begin()->_neuron + i] = _symbolicLbA[i]; - ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * sources.begin()->_neuron + i] = + ( *_predecessorSymbolicUb )[_layerIndex][_size * sources.begin()->_neuron + i] = _symbolicUbA[i]; - ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * sources.back()._neuron + i] = + ( *_predecessorSymbolicLb )[_layerIndex][_size * sources.back()._neuron + i] = _symbolicLbB[i]; - ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * sources.back()._neuron + i] = + ( *_predecessorSymbolicUb )[_layerIndex][_size * sources.back()._neuron + i] = _symbolicUbB[i]; - ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicLowerBias[i]; - ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicUpperBias[i]; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; } } diff --git a/src/nlr/DeepPolyElement.cpp b/src/nlr/DeepPolyElement.cpp index 3e7f1c9dc2..33ec43cfc9 100644 --- a/src/nlr/DeepPolyElement.cpp +++ b/src/nlr/DeepPolyElement.cpp @@ -21,8 +21,8 @@ DeepPolyElement::DeepPolyElement() : _layer( NULL ) , _size( 0 ) , _layerIndex( 0 ) - , _storeOutputLayerSymbolicBounds( false ) - , _storeSymbolicBoundsInTermsOfPredecessor( false ) + , _storeOutputSymbolicBounds( false ) + , _storePredecessorSymbolicBounds( false ) , _useParameterisedSBT( false ) , _layerIndicesToParameters( NULL ) , _outputLayerSize( 0 ) @@ -97,15 +97,14 @@ double DeepPolyElement::getUpperBound( unsigned index ) const return _ub[index]; } -void DeepPolyElement::setStoreOutputLayerSymbolicBounds( bool storeOutputLayerSymbolicBounds ) +void DeepPolyElement::setStoreOutputSymbolicBounds( bool storeOutputSymbolicBounds ) { - _storeOutputLayerSymbolicBounds = storeOutputLayerSymbolicBounds; + _storeOutputSymbolicBounds = storeOutputSymbolicBounds; } -void DeepPolyElement::setStoreSymbolicBoundsInTermsOfPredecessor( - bool storeSymbolicBoundsInTermsOfPredecessor ) +void DeepPolyElement::setStorePredecessorSymbolicBounds( bool storePredecessorSymbolicBounds ) { - _storeSymbolicBoundsInTermsOfPredecessor = storeSymbolicBoundsInTermsOfPredecessor; + _storePredecessorSymbolicBounds = storePredecessorSymbolicBounds; } void DeepPolyElement::setUseParameterisedSBT( bool useParameterisedSBT ) @@ -189,23 +188,23 @@ void DeepPolyElement::setWorkingMemory( double *work1SymbolicLb, } void DeepPolyElement::setSymbolicBoundsMemory( - Map> *outputLayerSymbolicLb, - Map> *outputLayerSymbolicUb, - Map> *outputLayerSymbolicLowerBias, - Map> *outputLayerSymbolicUpperBias, - Map> *symbolicLbInTermsOfPredecessor, - Map> *symbolicUbInTermsOfPredecessor, - Map> *symbolicLowerBiasInTermsOfPredecessor, - Map> *symbolicUpperBiasInTermsOfPredecessor ) + Map> *outputSymbolicLb, + Map> *outputSymbolicUb, + Map> *outputSymbolicLowerBias, + Map> *outputSymbolicUpperBias, + Map> *predecessorSymbolicLb, + Map> *predecessorSymbolicUb, + Map> *predecessorSymbolicLowerBias, + Map> *predecessorSymbolicUpperBias ) { - _outputLayerSymbolicLb = outputLayerSymbolicLb; - _outputLayerSymbolicUb = outputLayerSymbolicUb; - _outputLayerSymbolicLowerBias = outputLayerSymbolicLowerBias; - _outputLayerSymbolicUpperBias = outputLayerSymbolicUpperBias; - _symbolicLbInTermsOfPredecessor = symbolicLbInTermsOfPredecessor; - _symbolicUbInTermsOfPredecessor = symbolicUbInTermsOfPredecessor; - _symbolicLowerBiasInTermsOfPredecessor = symbolicLowerBiasInTermsOfPredecessor; - _symbolicUpperBiasInTermsOfPredecessor = symbolicUpperBiasInTermsOfPredecessor; + _outputSymbolicLb = outputSymbolicLb; + _outputSymbolicUb = outputSymbolicUb; + _outputSymbolicLowerBias = outputSymbolicLowerBias; + _outputSymbolicUpperBias = outputSymbolicUpperBias; + _predecessorSymbolicLb = predecessorSymbolicLb; + _predecessorSymbolicUb = predecessorSymbolicUb; + _predecessorSymbolicLowerBias = predecessorSymbolicLowerBias; + _predecessorSymbolicUpperBias = predecessorSymbolicUpperBias; } void DeepPolyElement::storeOutputSymbolicBounds( @@ -284,16 +283,14 @@ void DeepPolyElement::storeOutputSymbolicBounds( for ( unsigned i = 0; i < _size * _outputLayerSize; ++i ) { - ( *_outputLayerSymbolicLb )[_layerIndex][i] = work1SymbolicLb[i]; - ( *_outputLayerSymbolicUb )[_layerIndex][i] = work1SymbolicUb[i]; + ( *_outputSymbolicLb )[_layerIndex][i] = work1SymbolicLb[i]; + ( *_outputSymbolicUb )[_layerIndex][i] = work1SymbolicUb[i]; } for ( unsigned i = 0; i < _outputLayerSize; ++i ) { - ( *_outputLayerSymbolicLowerBias )[_layerIndex][i] = - symbolicLowerBiasConcretizedResiduals[i]; - ( *_outputLayerSymbolicUpperBias )[_layerIndex][i] = - symbolicUpperBiasConcretizedResiduals[i]; + ( *_outputSymbolicLowerBias )[_layerIndex][i] = symbolicLowerBiasConcretizedResiduals[i]; + ( *_outputSymbolicUpperBias )[_layerIndex][i] = symbolicUpperBiasConcretizedResiduals[i]; } } diff --git a/src/nlr/DeepPolyElement.h b/src/nlr/DeepPolyElement.h index d6da8f0d57..1c0b05953e 100644 --- a/src/nlr/DeepPolyElement.h +++ b/src/nlr/DeepPolyElement.h @@ -69,8 +69,8 @@ class DeepPolyElement double getLowerBound( unsigned index ) const; double getUpperBound( unsigned index ) const; - void setStoreOutputLayerSymbolicBounds( bool storeOutputLayerSymbolicBounds ); - void setStoreSymbolicBoundsInTermsOfPredecessor( bool storeSymbolicBoundsInTermsOfPredecessor ); + void setStoreOutputSymbolicBounds( bool storeOutputSymbolicBounds ); + void setStorePredecessorSymbolicBounds( bool storePredecessorSymbolicBounds ); void setUseParameterisedSBT( bool useParameterisedSBT ); void setLayerIndicesToParameters( Map> *layerIndicesToParameters ); void setOutputLayerSize( unsigned outputLayerSize ); @@ -82,15 +82,14 @@ class DeepPolyElement double *workSymbolicLowerBias, double *workSymbolicUpperBias ); - void - setSymbolicBoundsMemory( Map> *outputLayerSymbolicLb, - Map> *outputLayerSymbolicUb, - Map> *outputLayerSymbolicLowerBias, - Map> *outputLayerSymbolicUpperBias, - Map> *symbolicLbInTermsOfPredecessor, - Map> *symbolicUbInTermsOfPredecessor, - Map> *symbolicLowerBiasInTermsOfPredecessor, - Map> *symbolicUpperBiasInTermsOfPredecessor ); + void setSymbolicBoundsMemory( Map> *outputSymbolicLb, + Map> *outputSymbolicUb, + Map> *outputSymbolicLowerBias, + Map> *outputSymbolicUpperBias, + Map> *predecessorSymbolicLb, + Map> *predecessorSymbolicUb, + Map> *predecessorSymbolicLowerBias, + Map> *predecessorSymbolicUpperBias ); void storeOutputSymbolicBounds( double *work1SymbolicLb, @@ -109,8 +108,8 @@ class DeepPolyElement Layer *_layer; unsigned _size; unsigned _layerIndex; - bool _storeOutputLayerSymbolicBounds; - bool _storeSymbolicBoundsInTermsOfPredecessor; + bool _storeOutputSymbolicBounds; + bool _storePredecessorSymbolicBounds; bool _useParameterisedSBT; Map> *_layerIndicesToParameters; unsigned _outputLayerSize; @@ -134,15 +133,15 @@ class DeepPolyElement double *_workSymbolicLowerBias; double *_workSymbolicUpperBias; - Map> *_outputLayerSymbolicLb; - Map> *_outputLayerSymbolicUb; - Map> *_outputLayerSymbolicLowerBias; - Map> *_outputLayerSymbolicUpperBias; + Map> *_outputSymbolicLb; + Map> *_outputSymbolicUb; + Map> *_outputSymbolicLowerBias; + Map> *_outputSymbolicUpperBias; - Map> *_symbolicLbInTermsOfPredecessor; - Map> *_symbolicUbInTermsOfPredecessor; - Map> *_symbolicLowerBiasInTermsOfPredecessor; - Map> *_symbolicUpperBiasInTermsOfPredecessor; + Map> *_predecessorSymbolicLb; + Map> *_predecessorSymbolicUb; + Map> *_predecessorSymbolicLowerBias; + Map> *_predecessorSymbolicUpperBias; void allocateMemory(); void freeMemoryIfNeeded(); diff --git a/src/nlr/DeepPolyLeakyReLUElement.cpp b/src/nlr/DeepPolyLeakyReLUElement.cpp index 3bc3358abf..358d02770d 100644 --- a/src/nlr/DeepPolyLeakyReLUElement.cpp +++ b/src/nlr/DeepPolyLeakyReLUElement.cpp @@ -222,7 +222,7 @@ void DeepPolyLeakyReLUElement::execute( } - if ( _storeSymbolicBoundsInTermsOfPredecessor ) + if ( _storePredecessorSymbolicBounds ) { storePredecessorSymbolicBounds(); } @@ -235,12 +235,10 @@ void DeepPolyLeakyReLUElement::storePredecessorSymbolicBounds() for ( unsigned i = 0; i < _size; ++i ) { NeuronIndex sourceIndex = *( _layer->getActivationSources( i ).begin() ); - ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = - _symbolicLb[i]; - ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = - _symbolicUb[i]; - ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicLowerBias[i]; - ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicUpperBias[i]; + ( *_predecessorSymbolicLb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicUb[i]; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; } } diff --git a/src/nlr/DeepPolyMaxPoolElement.cpp b/src/nlr/DeepPolyMaxPoolElement.cpp index cdc5b195b5..4957e60ab3 100644 --- a/src/nlr/DeepPolyMaxPoolElement.cpp +++ b/src/nlr/DeepPolyMaxPoolElement.cpp @@ -117,7 +117,7 @@ void DeepPolyMaxPoolElement::execute( log( Stringf( "Handling Neuron %u_%u - done", _layerIndex, i ) ); } - if ( _storeSymbolicBoundsInTermsOfPredecessor ) + if ( _storePredecessorSymbolicBounds ) { storePredecessorSymbolicBounds( maxLowerBoundIndices, maxUpperBounds ); } @@ -133,21 +133,17 @@ void DeepPolyMaxPoolElement::storePredecessorSymbolicBounds( { if ( _phaseFixed[i] > 0 ) { - ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * indexOfMaxLowerBound[i] + i] = - 1; - ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * indexOfMaxLowerBound[i] + i] = - 1; - ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = 0; - ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = 0; + ( *_predecessorSymbolicLb )[_layerIndex][_size * indexOfMaxLowerBound[i] + i] = 1; + ( *_predecessorSymbolicUb )[_layerIndex][_size * indexOfMaxLowerBound[i] + i] = 1; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = 0; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = 0; } else { - ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * indexOfMaxLowerBound[i] + i] = - 1; - ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * indexOfMaxLowerBound[i] + i] = - 0; - ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = 0; - ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = maxUpperBound[i]; + ( *_predecessorSymbolicLb )[_layerIndex][_size * indexOfMaxLowerBound[i] + i] = 1; + ( *_predecessorSymbolicUb )[_layerIndex][_size * indexOfMaxLowerBound[i] + i] = 0; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = 0; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = maxUpperBound[i]; } } } diff --git a/src/nlr/DeepPolyReLUElement.cpp b/src/nlr/DeepPolyReLUElement.cpp index 290c2250f4..51accad00f 100644 --- a/src/nlr/DeepPolyReLUElement.cpp +++ b/src/nlr/DeepPolyReLUElement.cpp @@ -168,7 +168,7 @@ void DeepPolyReLUElement::execute( const Map &deepP log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); } - if ( _storeSymbolicBoundsInTermsOfPredecessor ) + if ( _storePredecessorSymbolicBounds ) { storePredecessorSymbolicBounds(); } @@ -181,12 +181,10 @@ void DeepPolyReLUElement::storePredecessorSymbolicBounds() for ( unsigned i = 0; i < _size; ++i ) { NeuronIndex sourceIndex = *( _layer->getActivationSources( i ).begin() ); - ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = - _symbolicLb[i]; - ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = - _symbolicUb[i]; - ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicLowerBias[i]; - ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicUpperBias[i]; + ( *_predecessorSymbolicLb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicUb[i]; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; } } diff --git a/src/nlr/DeepPolyRoundElement.cpp b/src/nlr/DeepPolyRoundElement.cpp index 754783bd59..15561fbeff 100644 --- a/src/nlr/DeepPolyRoundElement.cpp +++ b/src/nlr/DeepPolyRoundElement.cpp @@ -83,7 +83,7 @@ void DeepPolyRoundElement::execute( const Map &deep log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); } - if ( _storeSymbolicBoundsInTermsOfPredecessor ) + if ( _storePredecessorSymbolicBounds ) { storePredecessorSymbolicBounds(); } @@ -96,12 +96,10 @@ void DeepPolyRoundElement::storePredecessorSymbolicBounds() for ( unsigned i = 0; i < _size; ++i ) { NeuronIndex sourceIndex = *( _layer->getActivationSources( i ).begin() ); - ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = - _symbolicLb[i]; - ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = - _symbolicUb[i]; - ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicLowerBias[i]; - ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicUpperBias[i]; + ( *_predecessorSymbolicLb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicUb[i]; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; } } diff --git a/src/nlr/DeepPolySigmoidElement.cpp b/src/nlr/DeepPolySigmoidElement.cpp index 6cc924a98f..0f8fb71afc 100644 --- a/src/nlr/DeepPolySigmoidElement.cpp +++ b/src/nlr/DeepPolySigmoidElement.cpp @@ -100,7 +100,7 @@ void DeepPolySigmoidElement::execute( log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); } - if ( _storeSymbolicBoundsInTermsOfPredecessor ) + if ( _storePredecessorSymbolicBounds ) { storePredecessorSymbolicBounds(); } @@ -113,12 +113,10 @@ void DeepPolySigmoidElement::storePredecessorSymbolicBounds() for ( unsigned i = 0; i < _size; ++i ) { NeuronIndex sourceIndex = *( _layer->getActivationSources( i ).begin() ); - ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = - _symbolicLb[i]; - ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = - _symbolicUb[i]; - ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicLowerBias[i]; - ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicUpperBias[i]; + ( *_predecessorSymbolicLb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicUb[i]; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; } } diff --git a/src/nlr/DeepPolySignElement.cpp b/src/nlr/DeepPolySignElement.cpp index 26d9ee26a1..c915261723 100644 --- a/src/nlr/DeepPolySignElement.cpp +++ b/src/nlr/DeepPolySignElement.cpp @@ -153,7 +153,7 @@ void DeepPolySignElement::execute( const Map &deepP log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); } - if ( _storeSymbolicBoundsInTermsOfPredecessor ) + if ( _storePredecessorSymbolicBounds ) { storePredecessorSymbolicBounds(); } @@ -166,12 +166,10 @@ void DeepPolySignElement::storePredecessorSymbolicBounds() for ( unsigned i = 0; i < _size; ++i ) { NeuronIndex sourceIndex = *( _layer->getActivationSources( i ).begin() ); - ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = - _symbolicLb[i]; - ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][_size * sourceIndex._neuron + i] = - _symbolicUb[i]; - ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicLowerBias[i]; - ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicUpperBias[i]; + ( *_predecessorSymbolicLb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicUb[i]; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; } } diff --git a/src/nlr/DeepPolySoftmaxElement.cpp b/src/nlr/DeepPolySoftmaxElement.cpp index 1966bf8b4d..3490b1fa43 100644 --- a/src/nlr/DeepPolySoftmaxElement.cpp +++ b/src/nlr/DeepPolySoftmaxElement.cpp @@ -189,7 +189,7 @@ void DeepPolySoftmaxElement::execute( } } - if ( _storeSymbolicBoundsInTermsOfPredecessor ) + if ( _storePredecessorSymbolicBounds ) { storePredecessorSymbolicBounds(); } @@ -201,14 +201,14 @@ void DeepPolySoftmaxElement::storePredecessorSymbolicBounds() { for ( unsigned i = 0; i < _size * _size; ++i ) { - ( *_symbolicLbInTermsOfPredecessor )[_layerIndex][i] = _symbolicLb[i]; - ( *_symbolicUbInTermsOfPredecessor )[_layerIndex][i] = _symbolicUb[i]; + ( *_predecessorSymbolicLb )[_layerIndex][i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][i] = _symbolicUb[i]; } for ( unsigned i = 0; i < _size; ++i ) { - ( *_symbolicLowerBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicLowerBias[i]; - ( *_symbolicUpperBiasInTermsOfPredecessor )[_layerIndex][i] = _symbolicUpperBias[i]; + ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; + ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; } } diff --git a/src/nlr/DeepPolyWeightedSumElement.cpp b/src/nlr/DeepPolyWeightedSumElement.cpp index b1130f6fa5..62b0e2209b 100644 --- a/src/nlr/DeepPolyWeightedSumElement.cpp +++ b/src/nlr/DeepPolyWeightedSumElement.cpp @@ -103,7 +103,7 @@ void DeepPolyWeightedSumElement::computeBoundWithBackSubstitution( currentElement, deepPolyElementsBefore ); - if ( _storeOutputLayerSymbolicBounds ) + if ( _storeOutputSymbolicBounds ) { precedingElement->storeOutputSymbolicBounds( _work1SymbolicLb, _work1SymbolicUb, @@ -243,7 +243,7 @@ void DeepPolyWeightedSumElement::computeBoundWithBackSubstitution( std::fill_n( _residualUb[newCurrentIndex], currentMatrixSize, 0 ); } - if ( _storeOutputLayerSymbolicBounds ) + if ( _storeOutputSymbolicBounds ) { precedingElement->storeOutputSymbolicBounds( _work1SymbolicLb, _work1SymbolicUb, diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index 9b208d5e2c..42c2af69b1 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -737,175 +737,173 @@ double NetworkLevelReasoner::getPreviousBias( const ReluConstraint *reluConstrai return _previousBiases[reluConstraint]; } -Vector NetworkLevelReasoner::getOutputLayerSymbolicLb( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getOutputSymbolicLb( unsigned layerIndex ) const { // Initialize map if empty. - if ( _outputLayerSymbolicLb.empty() ) + if ( _outputSymbolicLb.empty() ) { const_cast( this )->initializeSymbolicBoundsMaps(); } - if ( !_outputLayerSymbolicLb.exists( layerIndex ) ) + if ( !_outputSymbolicLb.exists( layerIndex ) ) { throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in output layer symbolic bounds map." ); } - return _outputLayerSymbolicLb[layerIndex]; + return _outputSymbolicLb[layerIndex]; } -Vector NetworkLevelReasoner::getOutputLayerSymbolicUb( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getOutputSymbolicUb( unsigned layerIndex ) const { // Initialize map if empty. - if ( _outputLayerSymbolicUb.empty() ) + if ( _outputSymbolicUb.empty() ) { const_cast( this )->initializeSymbolicBoundsMaps(); } - if ( !_outputLayerSymbolicUb.exists( layerIndex ) ) + if ( !_outputSymbolicUb.exists( layerIndex ) ) { throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in output layer symbolic bounds map." ); } - return _outputLayerSymbolicUb[layerIndex]; + return _outputSymbolicUb[layerIndex]; } -Vector NetworkLevelReasoner::getOutputLayerSymbolicLowerBias( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getOutputSymbolicLowerBias( unsigned layerIndex ) const { // Initialize map if empty. - if ( _outputLayerSymbolicLowerBias.empty() ) + if ( _outputSymbolicLowerBias.empty() ) { const_cast( this )->initializeSymbolicBoundsMaps(); } - if ( !_outputLayerSymbolicLowerBias.exists( layerIndex ) ) + if ( !_outputSymbolicLowerBias.exists( layerIndex ) ) { throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in output layer symbolic bounds map." ); } - return _outputLayerSymbolicLowerBias[layerIndex]; + return _outputSymbolicLowerBias[layerIndex]; } -Vector NetworkLevelReasoner::getOutputLayerSymbolicUpperBias( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getOutputSymbolicUpperBias( unsigned layerIndex ) const { // Initialize map if empty. - if ( _outputLayerSymbolicUpperBias.empty() ) + if ( _outputSymbolicUpperBias.empty() ) { const_cast( this )->initializeSymbolicBoundsMaps(); } - if ( !_outputLayerSymbolicUpperBias.exists( layerIndex ) ) + if ( !_outputSymbolicUpperBias.exists( layerIndex ) ) { throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in output layer symbolic bounds map." ); } - return _outputLayerSymbolicUpperBias[layerIndex]; + return _outputSymbolicUpperBias[layerIndex]; } -Vector NetworkLevelReasoner::getSymbolicLbInTermsOfPredecessor( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getPredecessorSymbolicLb( unsigned layerIndex ) const { // Initialize map if empty. - if ( _symbolicLbInTermsOfPredecessor.empty() ) + if ( _predecessorSymbolicLb.empty() ) { const_cast( this )->initializeSymbolicBoundsMaps(); } - if ( !_symbolicLbInTermsOfPredecessor.exists( layerIndex ) ) + if ( !_predecessorSymbolicLb.exists( layerIndex ) ) { throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in predecessor layer symbolic bounds map." ); } - return _symbolicLbInTermsOfPredecessor[layerIndex]; + return _predecessorSymbolicLb[layerIndex]; } -Vector NetworkLevelReasoner::getSymbolicUbInTermsOfPredecessor( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getPredecessorSymbolicUb( unsigned layerIndex ) const { // Initialize map if empty. - if ( _symbolicUbInTermsOfPredecessor.empty() ) + if ( _predecessorSymbolicUb.empty() ) { const_cast( this )->initializeSymbolicBoundsMaps(); } - if ( !_symbolicUbInTermsOfPredecessor.exists( layerIndex ) ) + if ( !_predecessorSymbolicUb.exists( layerIndex ) ) { throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in predecessor layer symbolic bounds map." ); } - return _symbolicUbInTermsOfPredecessor[layerIndex]; + return _predecessorSymbolicUb[layerIndex]; } -Vector -NetworkLevelReasoner::getSymbolicLowerBiasInTermsOfPredecessor( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getPredecessorSymbolicLowerBias( unsigned layerIndex ) const { // Initialize map if empty. - if ( _symbolicLowerBiasInTermsOfPredecessor.empty() ) + if ( _predecessorSymbolicLowerBias.empty() ) { const_cast( this )->initializeSymbolicBoundsMaps(); } - if ( !_symbolicLowerBiasInTermsOfPredecessor.exists( layerIndex ) ) + if ( !_predecessorSymbolicLowerBias.exists( layerIndex ) ) { throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in predecessor layer symbolic bounds map." ); } - return _symbolicLowerBiasInTermsOfPredecessor[layerIndex]; + return _predecessorSymbolicLowerBias[layerIndex]; } -Vector -NetworkLevelReasoner::getSymbolicUpperBiasInTermsOfPredecessor( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getPredecessorSymbolicUpperBias( unsigned layerIndex ) const { // Initialize map if empty. - if ( _symbolicUpperBiasInTermsOfPredecessor.empty() ) + if ( _predecessorSymbolicUpperBias.empty() ) { const_cast( this )->initializeSymbolicBoundsMaps(); } - if ( !_symbolicUpperBiasInTermsOfPredecessor.exists( layerIndex ) ) + if ( !_predecessorSymbolicUpperBias.exists( layerIndex ) ) { throw NLRError( NLRError::LAYER_NOT_FOUND, "Layer not found in predecessor layer symbolic bounds map." ); } - return _symbolicUpperBiasInTermsOfPredecessor[layerIndex]; + return _predecessorSymbolicUpperBias[layerIndex]; } -Map NetworkLevelReasoner::getBBPSBranchingPoint( NeuronIndex index ) const +double NetworkLevelReasoner::getPMNRScore( NeuronIndex index ) const { // Initialize map if empty. - if ( _neuronToBBPSBranchingPoints.empty() ) + if ( _neuronToPMNRScores.empty() ) { - const_cast( this )->initializeBBPSMaps(); + const_cast( this )->initializePMNRScoreMap(); } - if ( !_neuronToBBPSBranchingPoints.exists( index ) ) + if ( !_neuronToPMNRScores.exists( index ) ) { - throw NLRError( NLRError::NEURON_NOT_FOUND, - "Neuron not found in BBPS branching points map." ); + throw NLRError( NLRError::NEURON_NOT_FOUND, "Neuron not found in PMNR scores map." ); } - return _neuronToBBPSBranchingPoints[index]; + return _neuronToPMNRScores[index]; } -double NetworkLevelReasoner::getBBPSScore( NeuronIndex index ) const +Map NetworkLevelReasoner::getBBPSBranchingPoint( NeuronIndex index ) const { // Initialize map if empty. - if ( _neuronToBBPSScores.empty() ) + if ( _neuronToBBPSBranchingPoints.empty() ) { - const_cast( this )->initializeBBPSMaps(); + const_cast( this )->initializeBBPSBranchingMap(); } - if ( !_neuronToBBPSScores.exists( index ) ) + if ( !_neuronToBBPSBranchingPoints.exists( index ) ) { - throw NLRError( NLRError::NEURON_NOT_FOUND, "Neuron not found in BBPS scores map." ); + throw NLRError( NLRError::NEURON_NOT_FOUND, + "Neuron not found in BBPS branching points map." ); } - return _neuronToBBPSScores[index]; + return _neuronToBBPSBranchingPoints[index]; } unsigned @@ -1003,20 +1001,16 @@ const Vector NetworkLevelReasoner::OptimalParameterisedSymbolicBoundTigh double lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; unsigned dimension = getNumberOfParameters(); bool maximize = false; + double sign = ( maximize ? 1 : -1 ); - Vector lowerBounds( dimension ); - Vector upperBounds( dimension ); - for ( size_t j = 0; j < dimension; ++j ) - { - lowerBounds[j] = 0; - upperBounds[j] = 1; - } + Vector lowerBounds( dimension, 0 ); + Vector upperBounds( dimension, 1 ); // Initialize initial guess uniformly. Vector guess( dimension ); std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); std::uniform_real_distribution dis( 0, 1 ); - for ( size_t j = 0; j < dimension; ++j ) + for ( unsigned j = 0; j < dimension; ++j ) { double lb = lowerBounds[j]; double ub = upperBounds[j]; @@ -1026,10 +1020,10 @@ const Vector NetworkLevelReasoner::OptimalParameterisedSymbolicBoundTigh Vector> candidates( dimension ); Vector gradient( dimension ); - for ( size_t i = 0; i < maxIterations; ++i ) + for ( unsigned i = 0; i < maxIterations; ++i ) { double currentCost = EstimateVolume( guess ); - for ( size_t j = 0; j < dimension; ++j ) + for ( unsigned j = 0; j < dimension; ++j ) { candidates[j] = Vector( guess ); candidates[j][j] += stepSize; @@ -1040,13 +1034,12 @@ const Vector NetworkLevelReasoner::OptimalParameterisedSymbolicBoundTigh continue; } - size_t sign = ( maximize == false ? 1 : -1 ); double cost = EstimateVolume( candidates[j] ); - gradient[j] = sign * ( cost - currentCost ) / stepSize + weightDecay * guess[j]; + gradient[j] = ( cost - currentCost ) / stepSize + weightDecay * guess[j]; } bool gradientIsZero = true; - for ( size_t j = 0; j < dimension; ++j ) + for ( unsigned j = 0; j < dimension; ++j ) { if ( FloatUtils::abs( gradient[j] ) > epsilon ) { @@ -1058,19 +1051,12 @@ const Vector NetworkLevelReasoner::OptimalParameterisedSymbolicBoundTigh break; } - for ( size_t j = 0; j < dimension; ++j ) + for ( unsigned j = 0; j < dimension; ++j ) { - guess[j] -= lr * gradient[j]; - - if ( guess[j] > upperBounds[j] ) - { - guess[j] = upperBounds[j]; - } + guess[j] += sign * lr * gradient[j]; - if ( guess[j] < lowerBounds[j] ) - { - guess[j] = lowerBounds[j]; - } + guess[j] = std::min( guess[j], upperBounds[j] ); + guess[j] = std::max( guess[j], lowerBounds[j] ); } } @@ -1191,7 +1177,7 @@ double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTightening( { // Search over coeffs in [0, 1]^numberOfParameters, gamma in // [0, inf)^sizeOfPrevTightenings with PGD. - // unsigned maxIterations = GlobalConfiguration::INVPROP_MAX_ITERATIONS; + // unsigned maxIterations = GlobalConfiguration::INVPROP_MAX_ITERATIONS; // ... unsigned maxIterations = 1000; // double coeffsStepSize = GlobalConfiguration::INVPROP_STEP_SIZE; // double gammaStepSize = GlobalConfiguration::INVPROP_STEP_SIZE; @@ -1203,7 +1189,7 @@ double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTightening( unsigned coeffsDimension = getNumberOfParameters(); unsigned gammaDimension = prevTightenings.size(); bool maximize = ( tightening._type == PolygonalTightening::LB ); - int sign = ( maximize ? 1 : -1 ); + double sign = ( maximize ? 1 : -1 ); double bestBound = sign * tightening._value; Vector coeffsLowerBounds( coeffsDimension, 0 ); @@ -1214,13 +1200,13 @@ double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTightening( Vector gamma( gammaDimension, GlobalConfiguration::INVPROP_INITIAL_GAMMA ); Vector> coeffsCandidates( coeffsDimension ); - Vector coeffsGradient( coeffsDimension ); Vector> gammaCandidates( gammaDimension ); + Vector coeffsGradient( coeffsDimension ); Vector gammaGradient( gammaDimension ); for ( unsigned i = 0; i < maxIterations; ++i ) { - double cost = getParameterisdPolygonalTighteningLowerBound( + double currentCost = getParameterisdPolygonalTighteningLowerBound( coeffs, gamma, tightening, prevTightenings ); for ( unsigned j = 0; j < coeffsDimension; ++j ) { @@ -1235,11 +1221,10 @@ double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTightening( continue; } - double currentCost = getParameterisdPolygonalTighteningLowerBound( + double cost = getParameterisdPolygonalTighteningLowerBound( coeffsCandidates[j], gamma, tightening, prevTightenings ); - coeffsGradient[j] = ( currentCost - cost ) / coeffsStepSize + weightDecay * coeffs[j]; - bestBound = ( maximize ? std::max( bestBound, currentCost ) - : std::min( bestBound, currentCost ) ); + coeffsGradient[j] = ( cost - currentCost ) / coeffsStepSize + weightDecay * coeffs[j]; + bestBound = ( maximize ? std::max( bestBound, cost ) : std::min( bestBound, cost ) ); } for ( unsigned j = 0; j < gammaDimension; ++j ) @@ -1253,11 +1238,10 @@ double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTightening( continue; } - double currentCost = getParameterisdPolygonalTighteningLowerBound( + double cost = getParameterisdPolygonalTighteningLowerBound( coeffs, gammaCandidates[j], tightening, prevTightenings ); gammaGradient[j] = ( currentCost - cost ) / gammaStepSize + weightDecay * gamma[j]; - bestBound = ( maximize ? std::max( bestBound, currentCost ) - : std::min( bestBound, currentCost ) ); + bestBound = ( maximize ? std::max( bestBound, cost ) : std::min( bestBound, cost ) ); } bool gradientIsZero = true; @@ -1302,13 +1286,8 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( PolygonalTightening &tightening, Vector &prevTightenings ) { - // First, run parameterised symbolic bound propagation and compue successor layers. - Map> layerIndicesToParameters = getParametersForLayers( coeffs ); - for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) - { - const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; - _layerIndexToLayer[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); - } + // First, run parameterised nitializeSymbolicBoundsMaps. + initializeSymbolicBoundsMaps( coeffs ); // Recursively compute mu, muHat for every layer. const unsigned numLayers = _layerIndexToLayer.size(); @@ -1385,14 +1364,14 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( { muHat[layerIndex][i] += mu[successorLayerIndex][j] * - getSymbolicUbInTermsOfPredecessor( successorLayerIndex ) + getPredecessorSymbolicUb( successorLayerIndex ) [successorLayerSize * predecessorIndex + j]; } else { muHat[layerIndex][i] -= mu[successorLayerIndex][j] * - getSymbolicLbInTermsOfPredecessor( successorLayerIndex ) + getPredecessorSymbolicLb( successorLayerIndex ) [successorLayerSize * predecessorIndex + j]; } } @@ -1492,13 +1471,13 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( { if ( mu[layerIndex][i] > 0 ) { - lowerBound -= mu[layerIndex][i] * - getSymbolicUpperBiasInTermsOfPredecessor( layerIndex )[i]; + lowerBound -= + mu[layerIndex][i] * getPredecessorSymbolicUpperBias( layerIndex )[i]; } else { - lowerBound += mu[layerIndex][i] * - getSymbolicLowerBiasInTermsOfPredecessor( layerIndex )[i]; + lowerBound += + mu[layerIndex][i] * getPredecessorSymbolicLowerBias( layerIndex )[i]; } } else @@ -1552,61 +1531,61 @@ const Vector NetworkLevelReasoner::generatePolygonalTighten const Vector NetworkLevelReasoner::generatePolygonalTighteningsForPMNR() { Vector tightenings = Vector( {} ); - unsigned neuronCount = GlobalConfiguration::PMNR_SELECTED_NEURONS; - Vector lowerBoundTightenings = Vector( {} ); - Vector upperBoundTightenings = Vector( {} ); + Vector lowerTightenings = Vector( {} ); + Vector upperTightenings = Vector( {} ); const Vector constraints = selectConstraints(); + unsigned neuronCount = constraints.size(); for ( const auto &pair : constraints ) { unsigned layerIndex = pair._layer; unsigned neuron = pair._neuron; Layer *layer = _layerIndexToLayer[layerIndex]; - unsigned layerSize = layer->getSize(); + unsigned size = layer->getSize(); - Map neuronToLowerCoefficient = Map( {} ); - Map neuronToUpperCoefficient = Map( {} ); - neuronToLowerCoefficient[pair] = -1; - neuronToUpperCoefficient[pair] = -1; + Map neuronToLowerCoeff; + Map neuronToUpperCoeff; + neuronToLowerCoeff[pair] = -1; + neuronToUpperCoeff[pair] = -1; List sources = layer->getActivationSources( neuron ); unsigned predecessorIndex = 0; for ( const auto &sourceIndex : sources ) { - neuronToLowerCoefficient[sourceIndex] = getSymbolicLbInTermsOfPredecessor( - layerIndex )[predecessorIndex * layerSize + neuron]; - neuronToUpperCoefficient[sourceIndex] = getSymbolicUbInTermsOfPredecessor( - layerIndex )[predecessorIndex * layerSize + neuron]; + neuronToLowerCoeff[sourceIndex] = + getPredecessorSymbolicLb( layerIndex )[predecessorIndex * size + neuron]; + neuronToUpperCoeff[sourceIndex] = + getPredecessorSymbolicUb( layerIndex )[predecessorIndex * size + neuron]; ++predecessorIndex; } - PolygonalTightening lowerTightening( neuronToLowerCoefficient, 0, PolygonalTightening::UB ); - PolygonalTightening upperTightening( neuronToLowerCoefficient, 0, PolygonalTightening::LB ); - lowerBoundTightenings.append( lowerTightening ); - upperBoundTightenings.append( upperTightening ); + PolygonalTightening lowerTightening( neuronToLowerCoeff, 0, PolygonalTightening::UB ); + PolygonalTightening upperTightening( neuronToLowerCoeff, 0, PolygonalTightening::LB ); + lowerTightenings.append( lowerTightening ); + upperTightenings.append( upperTightening ); } int range = ( 1 << neuronCount ) - 1; for ( int i = 0; i < range; ++i ) { - Map neuronToLowerCoefficient = Map( {} ); - Map neuronToUpperCoefficient = Map( {} ); + Map neuronToLowerCoeff; + Map neuronToUpperCoeff; for ( unsigned j = 0; j < neuronCount; ++j ) { int flag = ( i >> j ) % 2; if ( flag > 0 ) { - for ( const auto &pair : lowerBoundTightenings[j]._neuronToCoefficient ) + for ( const auto &pair : lowerTightenings[j]._neuronToCoefficient ) { - neuronToLowerCoefficient[pair.first] = pair.second; + neuronToLowerCoeff[pair.first] = pair.second; } - for ( const auto &pair : upperBoundTightenings[j]._neuronToCoefficient ) + for ( const auto &pair : upperTightenings[j]._neuronToCoefficient ) { - neuronToUpperCoefficient[pair.first] = pair.second; + neuronToUpperCoeff[pair.first] = pair.second; } } } - PolygonalTightening lowerTightening( neuronToLowerCoefficient, 0, PolygonalTightening::UB ); - PolygonalTightening upperTightening( neuronToLowerCoefficient, 0, PolygonalTightening::LB ); + PolygonalTightening lowerTightening( neuronToLowerCoeff, 0, PolygonalTightening::UB ); + PolygonalTightening upperTightening( neuronToLowerCoeff, 0, PolygonalTightening::LB ); tightenings.append( lowerTightening ); tightenings.append( upperTightening ); } @@ -1620,20 +1599,18 @@ const Vector NetworkLevelReasoner::generatePolygonalTighten Vector tightenings = Vector( {} ); for ( const auto &pair : _layerIndexToLayer ) { - unsigned layerIndex = pair.first; Layer *layer = pair.second; - const Vector &nonFixedNeurons = getNonFixedNeurons( layer ); - for ( const unsigned neuron : nonFixedNeurons ) + const Vector nonFixedNeurons = getNonFixedNeurons( layer ); + for ( const auto &index : nonFixedNeurons ) { - NeuronIndex index( layerIndex, neuron ); - Map neuronToLowerCoefficient = Map( {} ); - Map neuronToUpperCoefficient = Map( {} ); - neuronToLowerCoefficient[index] = 1; - neuronToUpperCoefficient[index] = 1; + Map neuronToLowerCoeff; + Map neuronToUpperCoeff; + neuronToLowerCoeff[index] = 1; + neuronToUpperCoeff[index] = 1; PolygonalTightening lowerTightening( - neuronToLowerCoefficient, layer->getUb( neuron ), PolygonalTightening::UB ); + neuronToLowerCoeff, layer->getUb( index._neuron ), PolygonalTightening::UB ); PolygonalTightening upperTightening( - neuronToLowerCoefficient, layer->getLb( neuron ), PolygonalTightening::LB ); + neuronToLowerCoeff, layer->getLb( index._neuron ), PolygonalTightening::LB ); tightenings.append( lowerTightening ); tightenings.append( upperTightening ); } @@ -1645,112 +1622,41 @@ const Vector NetworkLevelReasoner::generatePolygonalTighten const Vector NetworkLevelReasoner::selectConstraints() { - if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM ) - { - return selectConstraintsForPMNRRandom(); - } - else if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT ) - { - return selectConstraintsForPMNRGradient(); - } - else if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) - { - return selectConstraintsForPMNRBBPS(); - } - - const Vector vect( GlobalConfiguration::PMNR_SELECTED_NEURONS ); - return vect; -} - -const Vector NetworkLevelReasoner::selectConstraintsForPMNRRandom() -{ - unsigned neuronCount = GlobalConfiguration::PMNR_SELECTED_NEURONS; - Vector neuronVector = Vector( neuronCount ); - - const Vector &candidateLayers = getLayersWithNonFixedNeurons(); - std::mt19937_64 rng( GlobalConfiguration::PMNR_RANDOM_SEED ); - std::uniform_int_distribution disLayer( 0, candidateLayers.size() - 1 ); - unsigned entry = disLayer( rng ); - unsigned index = candidateLayers[entry]; - - Layer *layer = _layerIndexToLayer[index]; - const Vector &candidateNeurons = getNonFixedNeurons( layer ); - std::vector candidateNeuronsVector = candidateNeurons.getContainer(); - std::shuffle( candidateNeuronsVector.begin(), candidateNeuronsVector.end(), rng ); - for ( unsigned i = 0; i < neuronCount; ++i ) - { - unsigned neuron = candidateNeurons[i]; - NeuronIndex idx = NeuronIndex( index, neuron ); - neuronVector[i] = idx; - } - - const Vector vect( neuronVector ); - return vect; -} - -const Vector NetworkLevelReasoner::selectConstraintsForPMNRGradient() -{ - unsigned neuronCount = GlobalConfiguration::PMNR_SELECTED_NEURONS; - unsigned outputLayerSize = _layerIndexToLayer[getNumberOfLayers() - 1]->getSize(); - Vector neuronVector = Vector( neuronCount ); double maxScore = 0; unsigned maxScoreIndex = 0; - Map neuronIndexToScore; for ( const auto &pair : _layerIndexToLayer ) { - double score = 0; - unsigned index = pair.first; + double layerScore = 0; + unsigned layerIndex = pair.first; Layer *layer = pair.second; - unsigned layerSize = layer->getSize(); - - if ( getNonFixedNeurons( layer ).size() == 0 ) + const Vector nonFixedNeurons = getNonFixedNeurons( layer ); + for ( const auto &index : nonFixedNeurons ) { - continue; + double neuronScore = getPMNRScore( index ); + layerScore += neuronScore; } - for ( unsigned i = 0; i < layerSize; ++i ) + if ( layerScore > maxScore ) { - if ( isNeuronNonFixed( layer, i ) ) - { - double neuronScore = 0; - for ( unsigned j = 0; i < outputLayerSize; ++j ) - { - neuronScore += - std::pow( ( getOutputLayerSymbolicLb( index )[j * outputLayerSize + i] + - getOutputLayerSymbolicUb( index )[j * outputLayerSize + i] ) / - 2.0, - 2 ); - } - neuronIndexToScore.insert( NeuronIndex( index, i ), neuronScore ); - score += neuronScore; - } - } - - if ( score > maxScore ) - { - maxScore = score; - maxScoreIndex = index; + maxScore = layerScore; + maxScoreIndex = layerIndex; } } Layer *layer = _layerIndexToLayer[maxScoreIndex]; - unsigned layerSize = layer->getSize(); std::priority_queue, std::vector>, std::less>> maxQueue; - for ( unsigned i = 0; i < layerSize; ++i ) + const Vector nonFixedNeurons = getNonFixedNeurons( layer ); + for ( const auto &index : nonFixedNeurons ) { - if ( isNeuronNonFixed( layer, i ) ) - { - double neuronScore = neuronIndexToScore[NeuronIndex( maxScoreIndex, i )]; - maxQueue.push( std::pair( neuronScore, i ) ); - } + maxQueue.push( std::pair( getPMNRScore( index ), index._neuron ) ); } + unsigned neuronCount = + std::min( GlobalConfiguration::PMNR_SELECTED_NEURONS, nonFixedNeurons.size() ); + Vector neuronVector = Vector( neuronCount ); for ( unsigned i = 0; i < neuronCount; ++i ) { unsigned neuron = maxQueue.top().second; @@ -1763,98 +1669,82 @@ const Vector NetworkLevelReasoner::selectConstraintsForPMNRGradient return vect; } -const Vector NetworkLevelReasoner::selectConstraintsForPMNRBBPS() +void NetworkLevelReasoner::initializePMNRScoreMap() { - unsigned neuronCount = GlobalConfiguration::PMNR_SELECTED_NEURONS; - Vector neuronVector = Vector( neuronCount ); - - double maxScore = 0; - unsigned maxScoreIndex = 0; - Map neuronIndexToScore; + _neuronToPMNRScores.clear(); for ( const auto &pair : _layerIndexToLayer ) { - double score = 0; - unsigned index = pair.first; Layer *layer = pair.second; - unsigned layerSize = layer->getSize(); - - if ( getNonFixedNeurons( layer ).size() == 0 ) - { - continue; - } - - for ( unsigned i = 0; i < layerSize; ++i ) - { - if ( isNeuronNonFixed( layer, i ) ) - { - double neuronScore = getBBPSScore( NeuronIndex( index, i ) ); - neuronIndexToScore.insert( NeuronIndex( index, i ), neuronScore ); - score += neuronScore; - } - } - - if ( score > maxScore ) + const Vector nonFixedNeurons = getNonFixedNeurons( layer ); + for ( const auto &index : nonFixedNeurons ) { - maxScore = score; - maxScoreIndex = index; + _neuronToPMNRScores.insert( index, calculateNeuronPMNRScore( index ) ); } } +} - Layer *layer = _layerIndexToLayer[maxScoreIndex]; - unsigned layerSize = layer->getSize(); - std::priority_queue, - std::vector>, - std::less>> - maxQueue; - for ( unsigned i = 0; i < layerSize; ++i ) +double NetworkLevelReasoner::calculateNeuronPMNRScore( NeuronIndex index ) const +{ + ASSERT( isNeuronNonFixed( index ) ); + if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM ) { - if ( isNeuronNonFixed( layer, i ) ) - { - double neuronScore = neuronIndexToScore[NeuronIndex( maxScoreIndex, i )]; - maxQueue.push( std::pair( neuronScore, i ) ); - } + return 0; + } + else if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT ) + { + return calculatePMNRGradientScore( index ); + } + else if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) + { + return calculatePMNRBBPSScore( index ); } + return 0; +} - for ( unsigned i = 0; i < neuronCount; ++i ) +double NetworkLevelReasoner::calculatePMNRGradientScore( NeuronIndex index ) const +{ + double score = 0; + unsigned neuron = index._neuron; + unsigned layerIndex = index._layer; + unsigned outputLayerSize = _layerIndexToLayer[getNumberOfLayers() - 1]->getSize(); + for ( unsigned j = 0; j < outputLayerSize; ++j ) { - unsigned neuron = maxQueue.top().second; - NeuronIndex idx = NeuronIndex( maxScoreIndex, neuron ); - neuronVector[i] = idx; - maxQueue.pop(); + score += std::pow( ( getOutputSymbolicLb( layerIndex )[j * outputLayerSize + neuron] + + getOutputSymbolicUb( layerIndex )[j * outputLayerSize + neuron] ) / + 2.0, + 2 ); } + return score; +} - const Vector vect( neuronVector ); - return vect; +double NetworkLevelReasoner::calculatePMNRBBPSScore( NeuronIndex index ) const +{ + return index._neuron * 3.5; // ... } -void NetworkLevelReasoner::initializeBBPSMaps() +void NetworkLevelReasoner::initializeBBPSBranchingMap() { + _neuronToBBPSBranchingPoints.clear(); for ( const auto &pair : _layerIndexToLayer ) { - unsigned index = pair.first; Layer *layer = pair.second; - unsigned layerSize = layer->getSize(); - - if ( getNonFixedNeurons( layer ).size() == 0 ) + const Vector nonFixedNeurons = getNonFixedNeurons( layer ); + for ( const auto &index : nonFixedNeurons ) { - continue; - } - - for ( unsigned i = 0; i < layerSize; ++i ) - { - if ( isNeuronNonFixed( layer, i ) ) - { - double score = 0; - _neuronToBBPSScores.insert( NeuronIndex( index, i ), score ); - } + _neuronToBBPSBranchingPoints.insert( index, calculateBranchingPoint( index ) ); } } } -Map NetworkLevelReasoner::getBranchingPoint( Layer *layer, - unsigned neuron ) const +Map NetworkLevelReasoner::calculateBranchingPoint( NeuronIndex index ) const { - ASSERT( isNeuronNonFixed( layer, neuron ) ); + ASSERT( isNeuronNonFixed( index ) ); + unsigned layerIndex = index._layer; + unsigned neuron = index._neuron; + Layer *layer = _layerIndexToLayer[layerIndex]; Map point; double lb = layer->getLb( neuron ); @@ -1873,7 +1763,7 @@ Map NetworkLevelReasoner::getBranchingPoint( Layer *layer, branchingPoint = branchingPoint; - return point; + return point; // ... } const Map> @@ -1929,7 +1819,7 @@ const Vector NetworkLevelReasoner::getLayersWithNonFixedNeurons() cons { unsigned layerIndex = pair.first; Layer *layer = pair.second; - const Vector &nonFixedNeurons = getNonFixedNeurons( layer ); + const Vector nonFixedNeurons = getNonFixedNeurons( layer ); if ( nonFixedNeurons.size() > 0 ) { layerWithNonFixedNeurons.append( layerIndex ); @@ -1939,23 +1829,28 @@ const Vector NetworkLevelReasoner::getLayersWithNonFixedNeurons() cons return layerList; } -const Vector NetworkLevelReasoner::getNonFixedNeurons( Layer *layer ) const +const Vector NetworkLevelReasoner::getNonFixedNeurons( const Layer *layer ) const { - Vector nonFixedNeurons = Vector( {} ); + Vector nonFixedNeurons = Vector( {} ); unsigned size = layer->getSize(); + unsigned layerIndex = layer->getLayerIndex(); for ( unsigned i = 0; i < size; ++i ) { - if ( isNeuronNonFixed( layer, i ) ) + NeuronIndex index( layerIndex, i ); + if ( isNeuronNonFixed( index ) ) { - nonFixedNeurons.append( i ); + nonFixedNeurons.append( index ); } } - const Vector neuronList = Vector( nonFixedNeurons ); + const Vector neuronList = Vector( nonFixedNeurons ); return neuronList; } -bool NetworkLevelReasoner::isNeuronNonFixed( Layer *layer, unsigned neuron ) const +bool NetworkLevelReasoner::isNeuronNonFixed( NeuronIndex index ) const { + unsigned layerIndex = index._layer; + unsigned neuron = index._neuron; + Layer *layer = _layerIndexToLayer[layerIndex]; if ( layer->neuronEliminated( neuron ) ) { return false; @@ -2062,11 +1957,11 @@ bool NetworkLevelReasoner::isNeuronNonFixed( Layer *layer, unsigned neuron ) con sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); } - unsigned index = 0; + unsigned i = 0; for ( const auto &sourceIndex : sources ) { if ( handledInputNeurons.exists( sourceIndex._neuron ) ) - ++index; + ++i; else { handledInputNeurons.insert( sourceIndex._neuron ); @@ -2074,10 +1969,10 @@ bool NetworkLevelReasoner::isNeuronNonFixed( Layer *layer, unsigned neuron ) con } } - double lb = std::max( layer->getLb( neuron ), - Layer::linearLowerBound( sourceLbs, sourceUbs, index ) ); - double ub = std::max( layer->getUb( neuron ), - Layer::linearUpperBound( sourceLbs, sourceUbs, index ) ); + double lb = + std::max( layer->getLb( neuron ), Layer::linearLowerBound( sourceLbs, sourceUbs, i ) ); + double ub = + std::max( layer->getUb( neuron ), Layer::linearUpperBound( sourceLbs, sourceUbs, i ) ); nonFixed = !FloatUtils::areEqual( lb, ub ); break; } @@ -2121,15 +2016,15 @@ bool NetworkLevelReasoner::isNeuronNonFixed( Layer *layer, unsigned neuron ) con void NetworkLevelReasoner::initializeSymbolicBoundsMaps( const Vector &coeffs ) { // Clear the previous symbolic bound maps. - _outputLayerSymbolicLb.clear(); - _outputLayerSymbolicUb.clear(); - _outputLayerSymbolicLowerBias.clear(); - _outputLayerSymbolicUpperBias.clear(); + _outputSymbolicLb.clear(); + _outputSymbolicUb.clear(); + _outputSymbolicLowerBias.clear(); + _outputSymbolicUpperBias.clear(); - _symbolicLbInTermsOfPredecessor.clear(); - _symbolicUbInTermsOfPredecessor.clear(); - _symbolicLowerBiasInTermsOfPredecessor.clear(); - _symbolicUpperBiasInTermsOfPredecessor.clear(); + _predecessorSymbolicLb.clear(); + _predecessorSymbolicUb.clear(); + _predecessorSymbolicLowerBias.clear(); + _predecessorSymbolicUpperBias.clear(); // Temporarily add weighted sum layer to the NLR of the same size of the output layer. Layer *outputLayer = _layerIndexToLayer[getNumberOfLayers() - 1]; @@ -2149,27 +2044,25 @@ void NetworkLevelReasoner::initializeSymbolicBoundsMaps( const Vector &c } // Initialize maps with zero vectors. - unsigned maxLayerSize = getMaxLayerSize(); + unsigned maxSize = getMaxLayerSize(); for ( const auto &pair : _layerIndexToLayer ) { unsigned layerIndex = pair.first; Layer *layer = pair.second; - unsigned layerSize = layer->getSize(); + unsigned size = layer->getSize(); Layer::Type layerType = layer->getLayerType(); - _outputLayerSymbolicLb[layerIndex] = Vector( outputLayerSize * layerSize, 0 ); - _outputLayerSymbolicUb[layerIndex] = Vector( outputLayerSize * layerSize, 0 ); - _outputLayerSymbolicLowerBias[layerIndex] = Vector( outputLayerSize, 0 ); - _outputLayerSymbolicUpperBias[layerIndex] = Vector( outputLayerSize, 0 ); + _outputSymbolicLb[layerIndex] = Vector( outputLayerSize * size, 0 ); + _outputSymbolicUb[layerIndex] = Vector( outputLayerSize * size, 0 ); + _outputSymbolicLowerBias[layerIndex] = Vector( outputLayerSize, 0 ); + _outputSymbolicUpperBias[layerIndex] = Vector( outputLayerSize, 0 ); if ( layerType != Layer::WEIGHTED_SUM && layerType != Layer::INPUT ) { - _symbolicLbInTermsOfPredecessor[layerIndex] = - Vector( layerSize * maxLayerSize, 0 ); - _symbolicUbInTermsOfPredecessor[layerIndex] = - Vector( layerSize * maxLayerSize, 0 ); - _symbolicLowerBiasInTermsOfPredecessor[layerIndex] = Vector( layerSize, 0 ); - _symbolicUpperBiasInTermsOfPredecessor[layerIndex] = Vector( layerSize, 0 ); + _predecessorSymbolicLb[layerIndex] = Vector( size * maxSize, 0 ); + _predecessorSymbolicUb[layerIndex] = Vector( size * maxSize, 0 ); + _predecessorSymbolicLowerBias[layerIndex] = Vector( size, 0 ); + _predecessorSymbolicUpperBias[layerIndex] = Vector( size, 0 ); } } @@ -2181,20 +2074,20 @@ void NetworkLevelReasoner::initializeSymbolicBoundsMaps( const Vector &c layerIndicesToParameters = getParametersForLayers( coeffs ); } - _deepPolyAnalysis = std::unique_ptr( - new DeepPolyAnalysis( this, - true, - true, - useParameterisedSBT, - &layerIndicesToParameters, - &_outputLayerSymbolicLb, - &_outputLayerSymbolicUb, - &_outputLayerSymbolicLowerBias, - &_outputLayerSymbolicUpperBias, - &_symbolicLbInTermsOfPredecessor, - &_symbolicUbInTermsOfPredecessor, - &_symbolicLowerBiasInTermsOfPredecessor, - &_symbolicUpperBiasInTermsOfPredecessor ) ); + _deepPolyAnalysis = + std::unique_ptr( new DeepPolyAnalysis( this, + true, + true, + useParameterisedSBT, + &layerIndicesToParameters, + &_outputSymbolicLb, + &_outputSymbolicUb, + &_outputSymbolicLowerBias, + &_outputSymbolicUpperBias, + &_predecessorSymbolicLb, + &_predecessorSymbolicUb, + &_predecessorSymbolicLowerBias, + &_predecessorSymbolicUpperBias ) ); _deepPolyAnalysis->run(); _deepPolyAnalysis = nullptr; diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index 2b4c0dbb1a..90568b213a 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -210,18 +210,18 @@ class NetworkLevelReasoner : public LayerOwner */ double getPreviousBias( const ReluConstraint *reluConstraint ) const; - Vector getOutputLayerSymbolicLb( unsigned layerIndex ) const; - Vector getOutputLayerSymbolicUb( unsigned layerIndex ) const; - Vector getOutputLayerSymbolicLowerBias( unsigned layerIndex ) const; - Vector getOutputLayerSymbolicUpperBias( unsigned layerIndex ) const; + Vector getOutputSymbolicLb( unsigned layerIndex ) const; + Vector getOutputSymbolicUb( unsigned layerIndex ) const; + Vector getOutputSymbolicLowerBias( unsigned layerIndex ) const; + Vector getOutputSymbolicUpperBias( unsigned layerIndex ) const; - Vector getSymbolicLbInTermsOfPredecessor( unsigned layerIndex ) const; - Vector getSymbolicUbInTermsOfPredecessor( unsigned layerIndex ) const; - Vector getSymbolicLowerBiasInTermsOfPredecessor( unsigned layerIndex ) const; - Vector getSymbolicUpperBiasInTermsOfPredecessor( unsigned layerIndex ) const; + Vector getPredecessorSymbolicLb( unsigned layerIndex ) const; + Vector getPredecessorSymbolicUb( unsigned layerIndex ) const; + Vector getPredecessorSymbolicLowerBias( unsigned layerIndex ) const; + Vector getPredecessorSymbolicUpperBias( unsigned layerIndex ) const; Map getBBPSBranchingPoint( NeuronIndex index ) const; - double getBBPSScore( NeuronIndex index ) const; + double getPMNRScore( NeuronIndex index ) const; /* Finds logically consecutive WS layers and merges them, in order @@ -312,6 +312,11 @@ class NetworkLevelReasoner : public LayerOwner // Heuristically select neurons and polygonal tightenings for PMNR. const Vector selectConstraints(); + + + double getPMNRGradientScore( NeuronIndex index ) const; + double getPMNRBBPSScore( NeuronIndex index ) const; + const Vector selectConstraintsForPMNRRandom(); const Vector selectConstraintsForPMNRGradient(); const Vector selectConstraintsForPMNRBBPS(); @@ -333,9 +338,9 @@ class NetworkLevelReasoner : public LayerOwner // Get all indices of active non-weighted sum layers for INVPROP. const Vector getLayersWithNonFixedNeurons() const; - const Vector getNonFixedNeurons( Layer *layer ) const; + const Vector getNonFixedNeurons( const Layer *layer ) const; - bool isNeuronNonFixed( Layer *layer, unsigned neuron ) const; + bool isNeuronNonFixed( NeuronIndex index ) const; /* Store previous biases for each ReLU neuron in a map for getPreviousBias() @@ -344,9 +349,13 @@ class NetworkLevelReasoner : public LayerOwner Map _previousBiases; void initializePreviousBiasMap(); - void initializeBBPSMaps(); + void initializePMNRScoreMap(); + void initializeBBPSBranchingMap(); - Map getBranchingPoint( Layer *layer, unsigned neuron ) const; + double calculateNeuronPMNRScore( NeuronIndex index ) const; + double calculatePMNRGradientScore( NeuronIndex index ) const; + double calculatePMNRBBPSScore( NeuronIndex index ) const; + Map calculateBranchingPoint( NeuronIndex index ) const; /* If the NLR is manipulated manually in order to generate a new @@ -356,17 +365,17 @@ class NetworkLevelReasoner : public LayerOwner void reindexNeurons(); Map> _neuronToBBPSBranchingPoints; - Map _neuronToBBPSScores; + Map _neuronToPMNRScores; - Map> _outputLayerSymbolicLb; - Map> _outputLayerSymbolicUb; - Map> _outputLayerSymbolicLowerBias; - Map> _outputLayerSymbolicUpperBias; + Map> _outputSymbolicLb; + Map> _outputSymbolicUb; + Map> _outputSymbolicLowerBias; + Map> _outputSymbolicUpperBias; - Map> _symbolicLbInTermsOfPredecessor; - Map> _symbolicUbInTermsOfPredecessor; - Map> _symbolicLowerBiasInTermsOfPredecessor; - Map> _symbolicUpperBiasInTermsOfPredecessor; + Map> _predecessorSymbolicLb; + Map> _predecessorSymbolicUb; + Map> _predecessorSymbolicLowerBias; + Map> _predecessorSymbolicUpperBias; }; } // namespace NLR diff --git a/src/nlr/tests/Test_PMNR.h b/src/nlr/tests/Test_PMNR.h new file mode 100644 index 0000000000..ce4e723758 --- /dev/null +++ b/src/nlr/tests/Test_PMNR.h @@ -0,0 +1,11387 @@ +/********************* */ +/*! \file Test_LPRelaxation.h + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Andrew Wu + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + +**/ + +#include "../../engine/tests/MockTableau.h" // TODO: fix this +#include "DeepPolySoftmaxElement.h" +#include "FloatUtils.h" +#include "Layer.h" +#include "NetworkLevelReasoner.h" +#include "Options.h" +#include "Query.h" +#include "Tightening.h" +#include "Vector.h" + +#include + +class MockForNetworkLevelReasoner +{ +public: +}; + +class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite +{ +public: + MockForNetworkLevelReasoner *mock; + + void setUp() + { + TS_ASSERT( mock = new MockForNetworkLevelReasoner ); + } + + void tearDown() + { + TS_ASSERT_THROWS_NOTHING( delete mock ); + } + + void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 R -1 R -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ R / \ R / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using ReLU activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardReLU2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = ReLU( x3 ) + x8 = ReLU( x4 ) + x9 = ReLU( x5 ) + x10 = ReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = ReLU( x11 ) + x15 = ReLU( x12 ) + x16 = ReLU( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = ReLU( x17 ) + x20 = ReLU( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::RELU, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 S -1 S -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ S / \ S / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Sigmoid activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardSigmoid2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Sigmoid( x3 ) + x8 = Sigmoid( x4 ) + x9 = Sigmoid( x5 ) + x10 = Sigmoid( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Sigmoid( x11 ) + x15 = Sigmoid( x12 ) + x16 = Sigmoid( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Sigmoid( x17 ) + x20 = Sigmoid( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 Sign -1 Sign -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ Sign / \ Sign / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Sign activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardSign2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Sign( x3 ) + x8 = Sign( x4 ) + x9 = SIgn( x5 ) + x10 = Sign( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Sign( x11 ) + x15 = Sign( x12 ) + x16 = Sign( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Sign( x17 ) + x20 = Sign( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 Rnd -1 Rnd -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ Rnd / \ Rnd / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Round activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardRound2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Round( x3 ) + x8 = Round( x4 ) + x9 = Round( x5 ) + x10 = Round( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Round( x11 ) + x15 = Round( x12 ) + x16 = Round( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Round( x17 ) + x20 = Round( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardAbs( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 A -1 A -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ A / \ A / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Absolute value activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardAbs2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Abs( x3 ) + x8 = Abs( x4 ) + x9 = Abs( x5 ) + x10 = Abs( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Abs( x11 ) + x15 = Abs( x12 ) + x16 = Abs( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Abs( x17 ) + x20 = Abs( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 LR -1 LR -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ LR / \ LR / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardLeakyRelu2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = LeakyReLU( x3 ) + x8 = LeakyReLU( x4 ) + x9 = LeakyReLU( x5 ) + x10 = LeakyReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = LeakyReLU( x11 ) + x15 = LeakyReLU( x12 ) + x16 = LeakyReLU( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = LeakyReLU( x17 ) + x20 = LeakyReLU( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + nlr.getLayer( 6 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + a a' + x e + b b' f h + y d + c c' + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + + void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 + x1 x12 x15 x17 + x5 x9 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7, x8, x9, x10 = Softmax( x2, x3, x4, x5 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14, x15, x16 = Softmax( x11, x12, x13 ) + + x17 = Max( x14, x15, x16 ) + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 5, NLR::Layer::MAX, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 3, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 2 ); + nlr.addActivationSource( 1, 0, 2, 3 ); + nlr.addActivationSource( 1, 1, 2, 3 ); + nlr.addActivationSource( 1, 2, 2, 3 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + nlr.addActivationSource( 3, 2, 4, 0 ); + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 1 ); + nlr.addActivationSource( 3, 0, 4, 2 ); + nlr.addActivationSource( 3, 1, 4, 2 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 4, 0, 5, 0 ); + nlr.addActivationSource( 4, 1, 5, 0 ); + nlr.addActivationSource( 4, 2, 5, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 18 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + } + + void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + a a' + x e + b b' f h + y d + c c' + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + x3 x7 + x0 + x4 x8 x11 x13 + x1 x15 + x5 x9 x12 x14 + x2 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = ReLU( x3 ) + x8 = ReLU( x4 ) + x9 = ReLU( x5 ) + x10 = ReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + + x13 = ReLU( x11 ) + x14 = ReLU( x12 ) + + x15 = x13 * x14 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::BILINEAR, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setBias( 3, 0, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + nlr.addActivationSource( 4, 0, 5, 0 ); + nlr.addActivationSource( 4, 1, 5, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 14 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 15 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 16 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + } + + void test_invprop_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 8, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 19, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 20, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_sigmoid() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), + + Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), + Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), + + Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), + Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), + + Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), + Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), + Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), + + Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), + Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), + + Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), + Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), + + Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), + Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_sigmoid2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), + Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), + + Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), + Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), + Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), + + Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), + Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), + Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), + + Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), + Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), + + Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), + Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), + + Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), + Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), + Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), + + Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), + Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), + Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), + + Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), + Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), + Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), + + Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), + Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), + + Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), + Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), + + Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_abs() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), + + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), + + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_abs2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), + + Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), + + Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 11, -4, Tightening::LB ), + Tightening( 11, 6, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), + + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -10, Tightening::LB ), + Tightening( 7, 5, Tightening::UB ), + + Tightening( 11, -14, Tightening::LB ), + Tightening( 11, 11, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_round2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), + Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), + Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), + + Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), + Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), + + Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), + Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), + + Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), + Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), + Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), + Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), + + Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 4, -0.1, Tightening::LB ), + + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_leaky_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 14, -0.9, Tightening::LB ), + Tightening( 15, -0.89, Tightening::LB ), + Tightening( 16, -0.77, Tightening::LB ), + + Tightening( 19, -2.3133, Tightening::LB ), + Tightening( 20, -1.2, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), + Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 14, -1.1, Tightening::LB ), + Tightening( 15, -2.1771, Tightening::LB ), + Tightening( 16, -1.15, Tightening::LB ), + + Tightening( 19, -5.6259, Tightening::LB ), + Tightening( 20, -1.9, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_softmax_and_max() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), + Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), + Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), + + Tightening( 8, -0.1870, Tightening::LB ), Tightening( 8, 1.4488, Tightening::UB ), + Tightening( 9, 0.6814, Tightening::LB ), Tightening( 9, 2.2432, Tightening::UB ), + + Tightening( 10, 0.6814, Tightening::LB ), Tightening( 10, 2.2432, Tightening::UB ), + + Tightening( 11, -2.2432, Tightening::LB ), Tightening( 11, -0.6814, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.2194, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), + + Tightening( 10, 0.2194, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), + + Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.2194, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_softmax_and_max2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), + Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), + Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), + Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), + + Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9634, Tightening::UB ), + Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9152, Tightening::UB ), + Tightening( 13, -2.9868, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), + + Tightening( 14, 0.1143, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8694, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4596, Tightening::UB ), + + Tightening( 17, 0.1143, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), + + Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9326, Tightening::UB ), + Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), + Tightening( 13, -2.9990, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), + + Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), + + Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_invprop_relu_and_bilinear2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke Invprop + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 8, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 19, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 20, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_sigmoid() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), + + Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), + Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), + + Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), + Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), + + Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), + Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), + Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), + + Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), + Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), + + Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), + Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), + + Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), + Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_sigmoid2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), + Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), + + Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), + Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), + Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), + + Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), + Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), + Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), + + Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), + Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), + + Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), + Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), + + Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), + Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), + Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), + + Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), + Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), + Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), + + Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), + Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), + Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), + + Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), + Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), + + Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), + Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), + + Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_abs() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), + + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), + + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_abs2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), + + Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), + + Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 11, -4, Tightening::LB ), + Tightening( 11, 6, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), + + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -10, Tightening::LB ), + Tightening( 7, 5, Tightening::UB ), + + Tightening( 11, -14, Tightening::LB ), + Tightening( 11, 11, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_round2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), + Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), + Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), + + Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), + Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), + + Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), + Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), + + Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), + Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), + Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), + Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), + + Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 4, -0.1, Tightening::LB ), + + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_leaky_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 14, -0.9, Tightening::LB ), + Tightening( 15, -0.89, Tightening::LB ), + Tightening( 16, -0.77, Tightening::LB ), + + Tightening( 19, -2.3133, Tightening::LB ), + Tightening( 20, -1.2, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), + Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 14, -1.1, Tightening::LB ), + Tightening( 15, -2.1771, Tightening::LB ), + Tightening( 16, -1.15, Tightening::LB ), + + Tightening( 19, -5.6259, Tightening::LB ), + Tightening( 20, -1.9, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_softmax_and_max() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), + Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), + Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), + + Tightening( 8, -0.1870, Tightening::LB ), Tightening( 8, 1.4488, Tightening::UB ), + Tightening( 9, 0.6814, Tightening::LB ), Tightening( 9, 2.2432, Tightening::UB ), + + Tightening( 10, 0.6814, Tightening::LB ), Tightening( 10, 2.2432, Tightening::UB ), + + Tightening( 11, -2.2432, Tightening::LB ), Tightening( 11, -0.6814, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.2194, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), + + Tightening( 10, 0.2194, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), + + Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.2194, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_softmax_and_max2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), + Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), + Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), + Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), + + Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9634, Tightening::UB ), + Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9152, Tightening::UB ), + Tightening( 13, -2.9868, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), + + Tightening( 14, 0.1143, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8694, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4596, Tightening::UB ), + + Tightening( 17, 0.1143, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), + + Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9326, Tightening::UB ), + Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), + Tightening( 13, -2.9990, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), + + Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), + + Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_random_relu_and_bilinear2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with random neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + + void test_pmnr_gradient_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 8, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 19, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 20, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_sigmoid() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), + + Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), + Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), + + Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), + Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), + + Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), + Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), + Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), + + Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), + Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), + + Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), + Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), + + Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), + Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_sigmoid2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), + Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), + + Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), + Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), + Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), + + Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), + Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), + Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), + + Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), + Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), + + Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), + Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), + + Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), + Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), + Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), + + Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), + Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), + Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), + + Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), + Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), + Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), + + Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), + Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), + + Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), + Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), + + Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_abs() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), + + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), + + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_abs2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), + + Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), + + Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 11, -4, Tightening::LB ), + Tightening( 11, 6, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), + + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -10, Tightening::LB ), + Tightening( 7, 5, Tightening::UB ), + + Tightening( 11, -14, Tightening::LB ), + Tightening( 11, 11, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_round2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), + Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), + Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), + + Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), + Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), + + Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), + Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), + + Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), + Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), + Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), + Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), + + Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 4, -0.1, Tightening::LB ), + + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_leaky_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 14, -0.9, Tightening::LB ), + Tightening( 15, -0.89, Tightening::LB ), + Tightening( 16, -0.77, Tightening::LB ), + + Tightening( 19, -2.3133, Tightening::LB ), + Tightening( 20, -1.2, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), + Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 14, -1.1, Tightening::LB ), + Tightening( 15, -2.1771, Tightening::LB ), + Tightening( 16, -1.15, Tightening::LB ), + + Tightening( 19, -5.6259, Tightening::LB ), + Tightening( 20, -1.9, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_softmax_and_max() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), + Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), + Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), + + Tightening( 8, -0.1870, Tightening::LB ), Tightening( 8, 1.4488, Tightening::UB ), + Tightening( 9, 0.6814, Tightening::LB ), Tightening( 9, 2.2432, Tightening::UB ), + + Tightening( 10, 0.6814, Tightening::LB ), Tightening( 10, 2.2432, Tightening::UB ), + + Tightening( 11, -2.2432, Tightening::LB ), Tightening( 11, -0.6814, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.2194, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), + + Tightening( 10, 0.2194, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), + + Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.2194, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_softmax_and_max2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), + Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), + Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), + Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), + + Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9634, Tightening::UB ), + Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9152, Tightening::UB ), + Tightening( 13, -2.9868, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), + + Tightening( 14, 0.1143, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8694, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4596, Tightening::UB ), + + Tightening( 17, 0.1143, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), + + Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9326, Tightening::UB ), + Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), + Tightening( 13, -2.9990, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), + + Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), + + Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_gradient_relu_and_bilinear2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with gradient heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 8, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 19, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 20, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_sigmoid() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), + + Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), + Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), + + Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), + Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), + + Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), + Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), + Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), + + Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), + Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), + + Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), + Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), + + Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), + Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_sigmoid2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), + Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), + + Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), + Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), + Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), + + Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), + Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), + Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), + + Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), + Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), + + Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), + Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), + + Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), + Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), + Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), + + Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), + Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), + Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), + + Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), + Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), + Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), + + Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), + Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), + + Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), + Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), + + Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_abs() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), + + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), + + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_abs2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), + + Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), + + Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 11, -4, Tightening::LB ), + Tightening( 11, 6, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), + + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -10, Tightening::LB ), + Tightening( 7, 5, Tightening::UB ), + + Tightening( 11, -14, Tightening::LB ), + Tightening( 11, 11, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_round2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), + Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), + Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), + + Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), + Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), + + Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), + Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), + + Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), + Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), + Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), + Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), + + Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 4, -0.1, Tightening::LB ), + + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_leaky_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 14, -0.9, Tightening::LB ), + Tightening( 15, -0.89, Tightening::LB ), + Tightening( 16, -0.77, Tightening::LB ), + + Tightening( 19, -2.3133, Tightening::LB ), + Tightening( 20, -1.2, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), + Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 14, -1.1, Tightening::LB ), + Tightening( 15, -2.1771, Tightening::LB ), + Tightening( 16, -1.15, Tightening::LB ), + + Tightening( 19, -5.6259, Tightening::LB ), + Tightening( 20, -1.9, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_softmax_and_max() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), + Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), + Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), + + Tightening( 8, -0.1870, Tightening::LB ), Tightening( 8, 1.4488, Tightening::UB ), + Tightening( 9, 0.6814, Tightening::LB ), Tightening( 9, 2.2432, Tightening::UB ), + + Tightening( 10, 0.6814, Tightening::LB ), Tightening( 10, 2.2432, Tightening::UB ), + + Tightening( 11, -2.2432, Tightening::LB ), Tightening( 11, -0.6814, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.2194, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), + + Tightening( 10, 0.2194, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), + + Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.2194, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_softmax_and_max2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), + Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), + Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), + Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), + + Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9634, Tightening::UB ), + Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9152, Tightening::UB ), + Tightening( 13, -2.9868, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), + + Tightening( 14, 0.1143, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8694, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4596, Tightening::UB ), + + Tightening( 17, 0.1143, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), + + Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9326, Tightening::UB ), + Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), + Tightening( 13, -2.9990, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), + + Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), + + Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_pmnr_bbps_relu_and_bilinear2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PMNR with BBPS heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + bool boundsEqual( const List &bounds, const List &expectedBounds ) + { + if ( bounds.size() < expectedBounds.size() ) + return false; + + bool allFound = true; + for ( const auto &bound : bounds ) + { + bool currentFound = false; + for ( const auto &expectedBound : expectedBounds ) + { + currentFound |= + ( bound._type == expectedBound._type && + bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + } + allFound &= currentFound; + } + return allFound; + } + + // Create list of all tightenings in newBounds for which there is no bound in newBounds or in + // bounds which is at least as tight. + List removeRedundancies( const List &bounds, + const List &newBounds ) + { + List minimalBounds; + + for ( const auto &newBound : newBounds ) + { + bool foundTighter = false; + for ( const auto &bound : bounds ) + { + foundTighter |= + ( newBound._type == bound._type && newBound._variable == bound._variable && + ( ( newBound._type == Tightening::LB && + FloatUtils::lte( newBound._value, bound._value, 0.0001 ) ) || + ( newBound._type == Tightening::UB && + FloatUtils::gte( newBound._value, bound._value, 0.0001 ) ) ) ); + } + + for ( const auto &bound : newBounds ) + { + foundTighter |= + ( newBound._type == bound._type && newBound._variable == bound._variable && + ( ( newBound._type == Tightening::LB && + FloatUtils::lt( newBound._value, bound._value, 0.0001 ) ) || + ( newBound._type == Tightening::UB && + FloatUtils::gt( newBound._value, bound._value, 0.0001 ) ) ) ); + } + + if ( !foundTighter ) + minimalBounds.append( newBound ); + } + return minimalBounds; + } + + void updateTableau( MockTableau &tableau, List &tightenings ) + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + { + tableau.setLowerBound( tightening._variable, tightening._value ); + } + + if ( tightening._type == Tightening::UB ) + { + tableau.setUpperBound( tightening._variable, tightening._value ); + } + } + } +}; diff --git a/src/nlr/tests/Test_PMNRSelection.h b/src/nlr/tests/Test_PMNRSelection.h index d771680107..9b877270c1 100644 --- a/src/nlr/tests/Test_PMNRSelection.h +++ b/src/nlr/tests/Test_PMNRSelection.h @@ -1012,37 +1012,52 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x2 = 2x0 + 3x1, x3 = x0 + x1: x0 + 2x1 <= x6 <= x0 + 2x1 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 1, 2 } ), - Vector( { 1, 2 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 1, 2 } ), + Vector( { 1, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + { + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); + comparePMNRScores( nlr, Map( {} ) ); + } + { + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + comparePMNRScores( nlr, Map( {} ) ); + } + { + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + comparePMNRScores( nlr, Map( {} ) ); + } } void test_symbolic_bound_maps_relus_active_and_inactive() @@ -1144,37 +1159,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x3 = x0 + x1: -x0 - x1 <= x6 <= -x0 - x1 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0, -1 } ), - Vector( { 0, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { -1, -1 } ), - Vector( { -1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); } void test_symbolic_bound_maps_relus_active_and_not_fixed() @@ -1280,37 +1295,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x2 = 2x0 + 3x1, x3 = x0 + x1: x0 + 2x1 - 15 <= x6 <= 0.5x0 + 1.25x1 - 8.25 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 1, 0, 0, 1 } ), - Vector( { 0.75, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 3, 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 1, -1 } ), - Vector( { 0.75, -1 } ), - Vector( { 0 } ), - Vector( { 3 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 1, 2 } ), - Vector( { 0.5, 1.25 } ), - Vector( { -15 } ), - Vector( { -8.25 } ) ); + Vector( { 1, 0, 0, 1 } ), + Vector( { 0.75, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 3, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 1, -1 } ), + Vector( { 0.75, -1 } ), + Vector( { 0 } ), + Vector( { 3 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 1, 2 } ), + Vector( { 0.5, 1.25 } ), + Vector( { -15 } ), + Vector( { -8.25 } ) ); } void test_symbolic_bound_maps_relus_active_and_externally_fixed() @@ -1414,37 +1429,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x3 = x0 + x1: -x0 - x1 <= x6 <= -x0 - x1 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0, -1 } ), - Vector( { 0, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { -1, -1 } ), - Vector( { -1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); } void test_symbolic_bound_maps_relu_residual1() @@ -1560,55 +1575,55 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x1 = x0: -1.5x0 + 2.5 <= x5 <= x0 + 5 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0 } ), - Vector( { 0.5 } ), - Vector( { 0 } ), - Vector( { 0.5 } ) ); - comparePredecessorLayerSymbolicBounds( nlr, - 4, - Vector( { 1 } ), - Vector( { 0.6667 } ), - Vector( { 0 } ), - Vector( { 0.6667 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 5, - Vector( { 1 } ), - Vector( { 1 } ), + comparePredecessorSymbolicBounds( nlr, + 2, Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + Vector( { 0.5 } ), + Vector( { 0 } ), + Vector( { 0.5 } ) ); + comparePredecessorSymbolicBounds( nlr, 4, - Vector( { 3 } ), - Vector( { 3 } ), - Vector( { -2 } ), - Vector( { 4 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 3 } ), - Vector( { 2 } ), - Vector( { -2 } ), - Vector( { 6 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 2, - Vector( { -3 } ), - Vector( { -2 } ), - Vector( { -2 } ), - Vector( { 10 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 1.5 } ), - Vector( { 3 } ), - Vector( { -0.5 } ), - Vector( { 7 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { -1.5 } ), Vector( { 1 } ), - Vector( { 2.5 } ), - Vector( { 5 } ) ); + Vector( { 0.6667 } ), + Vector( { 0 } ), + Vector( { 0.6667 } ) ); + + compareOutputSymbolicBounds( nlr, + 5, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 3 } ), + Vector( { 3 } ), + Vector( { -2 } ), + Vector( { 4 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 3 } ), + Vector( { 2 } ), + Vector( { -2 } ), + Vector( { 6 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { -3 } ), + Vector( { -2 } ), + Vector( { -2 } ), + Vector( { 10 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 1.5 } ), + Vector( { 3 } ), + Vector( { -0.5 } ), + Vector( { 7 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1.5 } ), + Vector( { 1 } ), + Vector( { 2.5 } ), + Vector( { 5 } ) ); } void test_symbolic_bound_maps_relu_residual2() @@ -1736,61 +1751,61 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x1 = x0: -3.5x0 + 2.5 <= x6 <= -x0 + 5 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0 } ), - Vector( { 0.5 } ), - Vector( { 0 } ), - Vector( { 0.5 } ) ); - comparePredecessorLayerSymbolicBounds( nlr, - 4, - Vector( { 1 } ), - Vector( { 0.6667 } ), - Vector( { 0 } ), - Vector( { 0.6667 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 6, - Vector( { 1 } ), - Vector( { 1 } ), + comparePredecessorSymbolicBounds( nlr, + 2, Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 5, - Vector( { 1 } ), - Vector( { 1 } ), + Vector( { 0.5 } ), Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + Vector( { 0.5 } ) ); + comparePredecessorSymbolicBounds( nlr, 4, - Vector( { 3 } ), - Vector( { 3 } ), - Vector( { 0 } ), - Vector( { 2 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 3 } ), - Vector( { 2 } ), - Vector( { 0 } ), - Vector( { 4 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 2, - Vector( { -3 } ), - Vector( { -2 } ), - Vector( { 2 } ), - Vector( { 6 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { -1.5 } ), + Vector( { 1 } ), + Vector( { 0.6667 } ), Vector( { 0 } ), - Vector( { 0.5 } ), - Vector( { 6 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { -3.5 } ), - Vector( { -1 } ), - Vector( { 2.5 } ), - Vector( { 5 } ) ); + Vector( { 0.6667 } ) ); + + compareOutputSymbolicBounds( nlr, + 6, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 5, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 3 } ), + Vector( { 3 } ), + Vector( { 0 } ), + Vector( { 2 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 3 } ), + Vector( { 2 } ), + Vector( { 0 } ), + Vector( { 4 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { -3 } ), + Vector( { -2 } ), + Vector( { 2 } ), + Vector( { 6 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { -1.5 } ), + Vector( { 0 } ), + Vector( { 0.5 } ), + Vector( { 6 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -3.5 } ), + Vector( { -1 } ), + Vector( { 2.5 } ), + Vector( { 5 } ) ); } void test_symbolic_bound_maps_relu_reindex() @@ -1944,56 +1959,56 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 1 <= x10 <= x0 + 0.5x1 + 4 0 <= x11 <= 0.25x2 + 0.25x3 + 1.5 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0, 0, 0, 0 } ), - Vector( { 0, 0.5, 0.5, 0 } ), - Vector( { 0, 0 } ), - Vector( { 1, 1 } ) ); - - comparePredecessorLayerSymbolicBounds( nlr, - 4, - Vector( { 0, 1, 0, 0 } ), - Vector( { 0, 1, 0.5, 0 } ), - Vector( { 0, 0 } ), - Vector( { 1, 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 5, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0, 0, 0, 0 } ), + Vector( { 0, 0.5, 0.5, 0 } ), Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + Vector( { 1, 1 } ) ); + + comparePredecessorSymbolicBounds( nlr, 4, - Vector( { 1, 1, 1, 0 } ), - Vector( { 1, 1, 1, 0 } ), - Vector( { 1, 0 } ), + Vector( { 0, 1, 0, 0 } ), + Vector( { 0, 1, 0.5, 0 } ), + Vector( { 0, 0 } ), Vector( { 1, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1, 0, 0, 0 } ), - Vector( { 1, 0, 0.5, 0.5 } ), - Vector( { 1, 0 } ), - Vector( { 2, 1 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 2, - Vector( { 1, 0, 1, 0 } ), - Vector( { 0.5, -0.5, 1.5, 0.5 } ), - Vector( { 1, 0 } ), - Vector( { 2, 1 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0, 0, 0, 0 } ), - Vector( { 0.75, 0.25, 0.25, 0 } ), - Vector( { 1, 0 } ), - Vector( { 4, 1.5 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 0, 0, 0, 0 } ), - Vector( { 1, 0.25, 0.5, 0.25 } ), - Vector( { 1, 0 } ), - Vector( { 4, 1.5 } ) ); + + compareOutputSymbolicBounds( nlr, + 5, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1, 1, 1, 0 } ), + Vector( { 1, 1, 1, 0 } ), + Vector( { 1, 0 } ), + Vector( { 1, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 0 } ), + Vector( { 1, 0, 0.5, 0.5 } ), + Vector( { 1, 0 } ), + Vector( { 2, 1 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, 0, 1, 0 } ), + Vector( { 0.5, -0.5, 1.5, 0.5 } ), + Vector( { 1, 0 } ), + Vector( { 2, 1 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 0, 0, 0 } ), + Vector( { 0.75, 0.25, 0.25, 0 } ), + Vector( { 1, 0 } ), + Vector( { 4, 1.5 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0, 0, 0, 0 } ), + Vector( { 1, 0.25, 0.5, 0.25 } ), + Vector( { 1, 0 } ), + Vector( { 4, 1.5 } ) ); } void test_symbolic_bound_maps_abs_all_positive() @@ -2138,37 +2153,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x2 = 2x0 + 3x1, x3 = x0 + x1: x0 + 2x1 <= x6 <= x0 + 2x1 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 1, 2 } ), - Vector( { 1, 2 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 1, 2 } ), + Vector( { 1, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); } void test_symbolic_bound_maps_abs_positive_and_negative() @@ -2316,37 +2331,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x2 = 2x0 + 3x1 - 30, x3 = x0 + x1: -3x0 - 4x1 + 30 <= x6 <= -3x0 - 4x1 + 30 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { -1, 0, 0, 1 } ), - Vector( { -1, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { -1, -1 } ), - Vector( { -1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { -3, -4 } ), - Vector( { -3, -4 } ), - Vector( { 30 } ), - Vector( { 30 } ) ); + Vector( { -1, 0, 0, 1 } ), + Vector( { -1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -3, -4 } ), + Vector( { -3, -4 } ), + Vector( { 30 } ), + Vector( { 30 } ) ); } void test_symbolic_bound_maps_absolute_values_positive_and_not_fixed() @@ -2498,37 +2513,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x3 = x0 + x1: -x0 - x1 <= x6 <= -x0 - x1 + 12 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 12, 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0, -1 } ), - Vector( { 0, -1 } ), - Vector( { 0 } ), - Vector( { 12 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { -1, -1 } ), - Vector( { -1, -1 } ), - Vector( { 0 } ), - Vector( { 12 } ) ); + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 12, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 0 } ), + Vector( { 12 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 12 } ) ); } void test_symbolic_bound_maps_absolute_values_active_and_externally_fixed() @@ -2682,37 +2697,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x3 = x0 + x1: - x0 - x1 + 3 <= x6 <= - x0 - x1 + 3 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { -1, 0, 0, 1 } ), - Vector( { -1, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0, -1 } ), - Vector( { 0, -1 } ), - Vector( { 3 } ), - Vector( { 3 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { -1, -1 } ), - Vector( { -1, -1 } ), - Vector( { 3 } ), - Vector( { 3 } ) ); + Vector( { -1, 0, 0, 1 } ), + Vector( { -1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 3 } ), + Vector( { 3 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 3 } ), + Vector( { 3 } ) ); } void test_symbolic_bound_maps_signs_positive_and_not_fixed() @@ -2868,37 +2883,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x2 = 2x0 + 3x1 - 15: 1/3 x0 + 1/2 x1 - 4.5 <= x6 <= x0 + 1.5x1 - 7.5 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0.1667, 0, 0, 0 } ), - Vector( { 0.5, 0, 0, 0 } ), - Vector( { -1, 1 } ), - Vector( { 1, 1 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0.1667, 0 } ), - Vector( { 0.5, 0 } ), - Vector( { -2 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 0.3333, 0.5 } ), - Vector( { 1, 1.5 } ), - Vector( { -4.5 } ), - Vector( { -7.5 } ) ); + Vector( { 0.1667, 0, 0, 0 } ), + Vector( { 0.5, 0, 0, 0 } ), + Vector( { -1, 1 } ), + Vector( { 1, 1 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0.1667, 0 } ), + Vector( { 0.5, 0 } ), + Vector( { -2 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0.3333, 0.5 } ), + Vector( { 1, 1.5 } ), + Vector( { -4.5 } ), + Vector( { -7.5 } ) ); } void test_symbolic_bound_maps_signs_active_and_externally_fixed() @@ -3046,37 +3061,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 0: -2 <= x6 <= -2 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0, 0, 0, 0 } ), - Vector( { 0, 0, 0, 0 } ), - Vector( { -1, 1 } ), - Vector( { -1, 1 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0, 0 } ), - Vector( { 0, 0 } ), - Vector( { -2 } ), - Vector( { -2 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 0, 0 } ), - Vector( { 0, 0 } ), - Vector( { -2 } ), - Vector( { -2 } ) ); + Vector( { 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0 } ), + Vector( { -1, 1 } ), + Vector( { -1, 1 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { -2 } ), + Vector( { -2 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { -2 } ), + Vector( { -2 } ) ); } void test_symbolic_bound_maps_leaky_relu() @@ -3239,56 +3254,56 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 2x0 + 2x1 + 1 <= x10 <= 0.8 x0 + 0.72 x1 + 4.12 0.4x0 + 1.6x1 - 0.8 <= x11 <= -0.24 x0 + 0.96 x1 + 1.6 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 1, 0, 0, 1 } ), - Vector( { 0.6, 0, 0, 0.6 } ), - Vector( { 0, 0 } ), - Vector( { 0.8, 0.8 } ) ); - - comparePredecessorLayerSymbolicBounds( nlr, - 4, - Vector( { 1, 0, 0, 1 } ), - Vector( { 0.6667, 0, 0, 0.6 } ), - Vector( { 0, 0 } ), - Vector( { 0.9333, 1.12 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 5, - Vector( { 1, 0, 0, 1 } ), + comparePredecessorSymbolicBounds( nlr, + 2, Vector( { 1, 0, 0, 1 } ), + Vector( { 0.6, 0, 0, 0.6 } ), Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + Vector( { 0.8, 0.8 } ) ); + + comparePredecessorSymbolicBounds( nlr, 4, - Vector( { 1, 0, 1, 1 } ), - Vector( { 1, 0, 1, 1 } ), - Vector( { 1, 0 } ), - Vector( { 1, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1, 0, 1, 1 } ), - Vector( { 0.6667, 0, 0.6, 0.6 } ), - Vector( { 1, 0 } ), - Vector( { 3.0533, 1.12 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 2, - Vector( { 2, 1, 0, -1 } ), - Vector( { 1.2667, 0.6, 0.0667, -0.6 } ), - Vector( { 1, 0 } ), - Vector( { 3.0533, 1.12 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 2, 1, 0, -0.6 } ), - Vector( { 0.76, 0.36, 0.04, -0.6 } ), - Vector( { 1, -0.8 } ), - Vector( { 4.12, 1.6 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 2, 0.4, 2, 1.6 } ), - Vector( { 0.8, -0.24, 0.72, 0.96 } ), - Vector( { 1, -0.8 } ), - Vector( { 4.12, 1.6 } ) ); + Vector( { 1, 0, 0, 1 } ), + Vector( { 0.6667, 0, 0, 0.6 } ), + Vector( { 0, 0 } ), + Vector( { 0.9333, 1.12 } ) ); + + compareOutputSymbolicBounds( nlr, + 5, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1, 0, 1, 1 } ), + Vector( { 1, 0, 1, 1 } ), + Vector( { 1, 0 } ), + Vector( { 1, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 1, 1 } ), + Vector( { 0.6667, 0, 0.6, 0.6 } ), + Vector( { 1, 0 } ), + Vector( { 3.0533, 1.12 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 2, 1, 0, -1 } ), + Vector( { 1.2667, 0.6, 0.0667, -0.6 } ), + Vector( { 1, 0 } ), + Vector( { 3.0533, 1.12 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 2, 1, 0, -0.6 } ), + Vector( { 0.76, 0.36, 0.04, -0.6 } ), + Vector( { 1, -0.8 } ), + Vector( { 4.12, 1.6 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 2, 0.4, 2, 1.6 } ), + Vector( { 0.8, -0.24, 0.72, 0.96 } ), + Vector( { 1, -0.8 } ), + Vector( { 4.12, 1.6 } ) ); } void test_symbolic_bound_maps_sigmoids_and_round() @@ -3438,49 +3453,49 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0.2100 x0 + 0.1584 <= x8 <= 0.2100 x0 + 1.8416 0.2100 x1 - 0.8416 <= x9 <= 0.2100 x1 + 0.8516 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0.1050, 0, 0, 0.1050 } ), - Vector( { 0.1050, 0, 0, 0.1050 } ), - Vector( { 0.3292, 0.3292 } ), - Vector( { 0.6708, 0.6708 } ) ); - comparePredecessorLayerSymbolicBounds( nlr, - 4, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), - Vector( { -0.5, -0.5 } ), - Vector( { 0.5, 0.5 } ) ); - - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.1050, 0, 0, 0.1050 } ), + Vector( { 0.1050, 0, 0, 0.1050 } ), + Vector( { 0.3292, 0.3292 } ), + Vector( { 0.6708, 0.6708 } ) ); + comparePredecessorSymbolicBounds( nlr, 4, Vector( { 1, 0, 0, 1 } ), Vector( { 1, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), - Vector( { -0.5, -0.5 } ), - Vector( { 0.5, 0.5 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 2, - Vector( { 1, 1, 1, -1 } ), - Vector( { 1, 1, 1, -1 } ), Vector( { -0.5, -0.5 } ), Vector( { 0.5, 0.5 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), - Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), - Vector( { 0.1584, -0.8416 } ), - Vector( { 1.8416, 0.8416 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 0.2100, 0, 0, 0.2100 } ), - Vector( { 0.2100, 0, 0, 0.2100 } ), - Vector( { 0.1584, -0.8416 } ), - Vector( { 1.8416, 0.8416 } ) ); + + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, 1, 1, -1 } ), + Vector( { 1, 1, 1, -1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), + Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), + Vector( { 0.1584, -0.8416 } ), + Vector( { 1.8416, 0.8416 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0.2100, 0, 0, 0.2100 } ), + Vector( { 0.2100, 0, 0, 0.2100 } ), + Vector( { 0.1584, -0.8416 } ), + Vector( { 1.8416, 0.8416 } ) ); } void test_symbolic_bound_maps_max_not_fixed() @@ -3600,49 +3615,49 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 0: 0 <= x7 <= 6 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 1, 0, 0, 0 } ), - Vector( { 0.6, 0, 0, 0.4 } ), - Vector( { 0, 0 } ), - Vector( { 1.2, 1.2 } ) ); - comparePredecessorLayerSymbolicBounds( nlr, - 3, - Vector( { 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0 } ), - Vector( { 3 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 4, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 2 } ), - Vector( { 2 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0, 2 } ), - Vector( { 0, 0 } ), - Vector( { 0 } ), - Vector( { 6 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0, 0 } ), - Vector( { 0, 0 } ), - Vector( { 0 } ), - Vector( { 6 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, + Vector( { 1, 0, 0, 0 } ), + Vector( { 0.6, 0, 0, 0.4 } ), Vector( { 0, 0 } ), + Vector( { 1.2, 1.2 } ) ); + comparePredecessorSymbolicBounds( nlr, + 3, + Vector( { 0, 1 } ), Vector( { 0, 0 } ), Vector( { 0 } ), - Vector( { 6 } ) ); + Vector( { 3 } ) ); + + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 2 } ), + Vector( { 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 0, 2 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 6 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 6 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 6 } ) ); } void test_symbolic_bound_maps_max_fixed() @@ -3755,49 +3770,49 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x3 = x0 - x1 2x0 - 2x1 <= x7 <= 2x0 - 2x1 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - comparePredecessorLayerSymbolicBounds( nlr, - 3, - Vector( { 0, 1 } ), - Vector( { 0, 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 4, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 2 } ), - Vector( { 2 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0, 2 } ), - Vector( { 0, 2 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0, 2 } ), - Vector( { 0, 2 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 2, -2 } ), - Vector( { 2, -2 } ), + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + comparePredecessorSymbolicBounds( nlr, + 3, + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), Vector( { 0 } ), Vector( { 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 2 } ), + Vector( { 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 0, 2 } ), + Vector( { 0, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 2 } ), + Vector( { 0, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 2, -2 } ), + Vector( { 2, -2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); } void test_symbolic_bound_maps_softmax1() @@ -4021,53 +4036,53 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 1 <= x9 <= 1 -1 <= x10 <= -1 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0.1922, - -0.1830, - -0.0091, - -0.1830, - 0.2078, - -0.0248, - -0.0091, - -0.0248, - 0.0339 } ), - Vector( { 0.1922, - -0.1830, - -0.0091, - -0.1830, - 0.2078, - -0.0248, - -0.0091, - -0.0248, - 0.0339 } ), - Vector( { 0.4243, 0.4481, 0.1277 } ), - Vector( { 0.4243, 0.4480, 0.1277 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1, 1, -1, 1, -1 } ), - Vector( { 1, -1, 1, -1, 1, -1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0, 0, 0, 0, 0, 0 } ), - Vector( { 0, 0, 0, 0, 0, 0 } ), - Vector( { 1, -1 } ), - Vector( { 1, -1 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 0, 0, 0, 0, 0, 0 } ), - Vector( { 0, 0, 0, 0, 0, 0 } ), - Vector( { 1, -1 } ), - Vector( { 1, -1 } ) ); + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.4243, 0.4481, 0.1277 } ), + Vector( { 0.4243, 0.4480, 0.1277 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); } { Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); @@ -4264,53 +4279,53 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 1 <= x9 <= 1 -1 <= x10 <= -1 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0.1922, - -0.1830, - -0.0091, - -0.1830, - 0.2078, - -0.0248, - -0.0091, - -0.0248, - 0.0339 } ), - Vector( { 0.1922, - -0.1830, - -0.0091, - -0.1830, - 0.2078, - -0.0248, - -0.0091, - -0.0248, - 0.0339 } ), - Vector( { 0.4243, 0.4481, 0.1277 } ), - Vector( { 0.4243, 0.4480, 0.1277 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1, 1, -1, 1, -1 } ), - Vector( { 1, -1, 1, -1, 1, -1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0, 0, 0, 0, 0, 0 } ), - Vector( { 0, 0, 0, 0, 0, 0 } ), - Vector( { 1, -1 } ), - Vector( { 1, -1 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 0, 0, 0, 0, 0, 0 } ), - Vector( { 0, 0, 0, 0, 0, 0 } ), - Vector( { 1, -1 } ), - Vector( { 1, -1 } ) ); + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.4243, 0.4481, 0.1277 } ), + Vector( { 0.4243, 0.4480, 0.1277 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); } } @@ -4603,7 +4618,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 1 <= x15 <= 1 -1 <= x16 <= -1 */ - comparePredecessorLayerSymbolicBounds( + comparePredecessorSymbolicBounds( nlr, 2, Vector( { 0.1155, 0.0000, -0.1017, 0.0000, -0.0138, 0.0000, 0.0177, @@ -4617,32 +4632,32 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 0.6084, 0.9114, 0.3170, 0.0886, 0.0747 } ), Vector( { 0.6084, 0.9114, 0.3170, 0.0886, 0.0747 } ) ); - compareOutputLayerSymbolicBounds( + compareOutputSymbolicBounds( nlr, 3, Vector( { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 } ), Vector( { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 } ), Vector( { 0, 0, 0, 0 } ), Vector( { 0, 0, 0, 0 } ) ); - compareOutputLayerSymbolicBounds( + compareOutputSymbolicBounds( nlr, 2, Vector( { 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0 } ), Vector( { 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0 } ), Vector( { 0, 0, 0, 0 } ), Vector( { 0, 0, 0, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( 20, 0 ), - Vector( 20, 0 ), - Vector( { 1, -1, 1, -1 } ), - Vector( { 1, -1, 1, -1 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( 12, 0 ), - Vector( 12, 0 ), - Vector( { 1, -1, 1, -1 } ), - Vector( { 1, -1, 1, -1 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( 20, 0 ), + Vector( 20, 0 ), + Vector( { 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( 12, 0 ), + Vector( 12, 0 ), + Vector( { 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1 } ) ); } void test_symbolic_bound_maps_bilinear() @@ -4738,37 +4753,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x2 = x0 - 2x1, x3 = x0 + x1: -2x0 + 7x1 - 3 <= x5 <= 2x0 - x1 + 1 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { -1, -1 } ), - Vector( { 3, -1 } ), - Vector( { -1 } ), - Vector( { 3 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, + Vector( { -1, -1 } ), + Vector( { 3, -1 } ), Vector( { -1 } ), - Vector( { -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { -3, 1 } ), - Vector( { 1, 1 } ), - Vector( { -3 } ), - Vector( { 1 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { -2, 7 } ), - Vector( { 2, -1 } ), - Vector( { -3 } ), - Vector( { 1 } ) ); + Vector( { 3 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { -1 } ), + Vector( { -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { -3, 1 } ), + Vector( { 1, 1 } ), + Vector( { -3 } ), + Vector( { 1 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -2, 7 } ), + Vector( { 2, -1 } ), + Vector( { -3 } ), + Vector( { 1 } ) ); } void test_parameterised_symbolic_bound_maps_relus_all_active() @@ -4869,37 +4884,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x2 = 2x0 + 3x1, x3 = x0 + x1: x0 + 2x1 <= x6 <= x0 + 2x1 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 1, 2 } ), - Vector( { 1, 2 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 1, 2 } ), + Vector( { 1, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); } void test_parameterised_symbolic_bound_maps_relus_active_and_inactive() @@ -5004,37 +5019,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x3 = x0 + x1: -x0 - x1 <= x6 <= -x0 - x1 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0, -1 } ), - Vector( { 0, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { -1, -1 } ), - Vector( { -1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); } void test_parameterised_symbolic_bound_maps_relus_active_and_not_fixed() @@ -5143,37 +5158,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x2 = 2x0 + 3x1, x3 = x0 + x1: 0.5 x1 - 7.5 <= x6 <= 0.5x0 + 1.25x1 - 8.25 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0.5, 0, 0, 1 } ), - Vector( { 0.75, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 3, 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0.5, -1 } ), - Vector( { 0.75, -1 } ), - Vector( { 0 } ), - Vector( { 3 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 0, 0.5 } ), - Vector( { 0.5, 1.25 } ), - Vector( { -7.5 } ), - Vector( { -8.25 } ) ); + Vector( { 0.5, 0, 0, 1 } ), + Vector( { 0.75, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 3, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0.5, -1 } ), + Vector( { 0.75, -1 } ), + Vector( { 0 } ), + Vector( { 3 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0, 0.5 } ), + Vector( { 0.5, 1.25 } ), + Vector( { -7.5 } ), + Vector( { -8.25 } ) ); } void test_parameterised_symbolic_bound_maps_relus_active_and_externally_fixed() @@ -5280,37 +5295,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x3 = x0 + x1: -x0 - x1 <= x6 <= -x0 - x1 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0, -1 } ), - Vector( { 0, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { -1, -1 } ), - Vector( { -1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); } void test_parameterised_symbolic_bound_maps_relu_residual1() @@ -5430,55 +5445,55 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x1 = x0: 0.75 x0 + 1.75 <= x5 <= -3/14 x0 + 37/7 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0.5 } ), - Vector( { 0.5 } ), - Vector( { 0 } ), - Vector( { 0.5 } ) ); - comparePredecessorLayerSymbolicBounds( nlr, - 4, - Vector( { 0.5 } ), - Vector( { 0.7143 } ), - Vector( { 0 } ), - Vector( { 0.7143 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 5, - Vector( { 1 } ), - Vector( { 1 } ), + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.5 } ), + Vector( { 0.5 } ), Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + Vector( { 0.5 } ) ); + comparePredecessorSymbolicBounds( nlr, 4, - Vector( { 3 } ), - Vector( { 3 } ), - Vector( { -2 } ), - Vector( { 4 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1.5 } ), - Vector( { 2.1429 } ), - Vector( { -2 } ), - Vector( { 6.1429 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 2, - Vector( { -1.5 } ), - Vector( { -2.1429 } ), - Vector( { -2 } ), - Vector( { 10.4286 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 2.25 } ), - Vector( { 1.9286 } ), - Vector( { 0.25 } ), - Vector( { 7.4286 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 0.75 } ), - Vector( { -0.2143 } ), - Vector( { 1.75 } ), - Vector( { 5.2857 } ) ); + Vector( { 0.5 } ), + Vector( { 0.7143 } ), + Vector( { 0 } ), + Vector( { 0.7143 } ) ); + + compareOutputSymbolicBounds( nlr, + 5, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 3 } ), + Vector( { 3 } ), + Vector( { -2 } ), + Vector( { 4 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1.5 } ), + Vector( { 2.1429 } ), + Vector( { -2 } ), + Vector( { 6.1429 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { -1.5 } ), + Vector( { -2.1429 } ), + Vector( { -2 } ), + Vector( { 10.4286 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 2.25 } ), + Vector( { 1.9286 } ), + Vector( { 0.25 } ), + Vector( { 7.4286 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0.75 } ), + Vector( { -0.2143 } ), + Vector( { 1.75 } ), + Vector( { 5.2857 } ) ); } void test_parameterised_symbolic_bound_maps_relu_residual2() @@ -5609,61 +5624,61 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x1 = x0: -1.25 x0 + 1.75 <= x6 <= -31/14 x0 + 37/7 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0.5 } ), - Vector( { 0.5 } ), - Vector( { 0 } ), - Vector( { 0.5 } ) ); - comparePredecessorLayerSymbolicBounds( nlr, - 4, - Vector( { 0.5 } ), - Vector( { 0.7143 } ), - Vector( { 0 } ), - Vector( { 0.7143 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 6, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 5, - Vector( { 1 } ), - Vector( { 1 } ), + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.5 } ), + Vector( { 0.5 } ), Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + Vector( { 0.5 } ) ); + comparePredecessorSymbolicBounds( nlr, 4, - Vector( { 3 } ), - Vector( { 3 } ), - Vector( { 0 } ), - Vector( { 2 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1.5 } ), - Vector( { 2.1429 } ), + Vector( { 0.5 } ), + Vector( { 0.7143 } ), Vector( { 0 } ), - Vector( { 4.1429 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 2, - Vector( { -1.5 } ), - Vector( { -2.1429 } ), - Vector( { 2 } ), - Vector( { 6.4286 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { -0.75 } ), - Vector( { -1.0714 } ), - Vector( { 1.25 } ), - Vector( { 6.4286 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { -1.25 } ), - Vector( { -2.2143 } ), - Vector( { 1.75 } ), - Vector( { 5.2857 } ) ); + Vector( { 0.7143 } ) ); + + compareOutputSymbolicBounds( nlr, + 6, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 5, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 3 } ), + Vector( { 3 } ), + Vector( { 0 } ), + Vector( { 2 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1.5 } ), + Vector( { 2.1429 } ), + Vector( { 0 } ), + Vector( { 4.1429 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { -1.5 } ), + Vector( { -2.1429 } ), + Vector( { 2 } ), + Vector( { 6.4286 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { -0.75 } ), + Vector( { -1.0714 } ), + Vector( { 1.25 } ), + Vector( { 6.4286 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1.25 } ), + Vector( { -2.2143 } ), + Vector( { 1.75 } ), + Vector( { 5.2857 } ) ); } void test_parameterised_symbolic_bound_maps_relu_reindex() @@ -5824,56 +5839,56 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0.5 x0 + 0.5 x1 + 1 <= x10 <= 0.75 x0 + 0.5 x1 + 4.25 0.5 x1 - 0.5 <= x11 <= 0.5 x1 + 1.5 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0, 0.5, 0.5, 0 } ), - Vector( { 0, 0.5, 0.5, 0 } ), - Vector( { 0, 0 } ), - Vector( { 1, 1 } ) ); - - comparePredecessorLayerSymbolicBounds( nlr, - 4, - Vector( { 0, 0.5, 0.5, 0 } ), - Vector( { 0, 0.75, 0.5, 0 } ), - Vector( { 0, 0 } ), - Vector( { 1, 0.75 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 5, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0, 0.5, 0.5, 0 } ), + Vector( { 0, 0.5, 0.5, 0 } ), Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + Vector( { 1, 1 } ) ); + + comparePredecessorSymbolicBounds( nlr, 4, - Vector( { 1, 1, 1, 0 } ), - Vector( { 1, 1, 1, 0 } ), - Vector( { 1, 0 } ), - Vector( { 1, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 0.5, 0, 0.5, 0.5 } ), - Vector( { 0.75, 0, 0.5, 0.5 } ), - Vector( { 1, 0 } ), - Vector( { 2.75, 1 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 2, - Vector( { 0, -0.5, 1, 0.5 } ), - Vector( { 0.25, -0.5, 1.25, 0.5 } ), - Vector( { 1, 0 } ), - Vector( { 2.75, 1 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0.5, 0.25, 0, -0.25 } ), - Vector( { 0.625, 0.25, 0.125, -0.25 } ), - Vector( { 1, -0.5 } ), - Vector( { 4.25, 1.5 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 0.5, 0, 0.5, 0.5 } ), - Vector( { 0.75, 0, 0.5, 0.5 } ), - Vector( { 1, -0.5 } ), - Vector( { 4.25, 1.5 } ) ); + Vector( { 0, 0.5, 0.5, 0 } ), + Vector( { 0, 0.75, 0.5, 0 } ), + Vector( { 0, 0 } ), + Vector( { 1, 0.75 } ) ); + + compareOutputSymbolicBounds( nlr, + 5, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1, 1, 1, 0 } ), + Vector( { 1, 1, 1, 0 } ), + Vector( { 1, 0 } ), + Vector( { 1, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 0.5, 0, 0.5, 0.5 } ), + Vector( { 0.75, 0, 0.5, 0.5 } ), + Vector( { 1, 0 } ), + Vector( { 2.75, 1 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 0, -0.5, 1, 0.5 } ), + Vector( { 0.25, -0.5, 1.25, 0.5 } ), + Vector( { 1, 0 } ), + Vector( { 2.75, 1 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0.5, 0.25, 0, -0.25 } ), + Vector( { 0.625, 0.25, 0.125, -0.25 } ), + Vector( { 1, -0.5 } ), + Vector( { 4.25, 1.5 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0.5, 0, 0.5, 0.5 } ), + Vector( { 0.75, 0, 0.5, 0.5 } ), + Vector( { 1, -0.5 } ), + Vector( { 4.25, 1.5 } ) ); } void test_parameterised_symbolic_bound_maps_abs_all_positive() @@ -6021,37 +6036,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x2 = 2x0 + 3x1, x3 = x0 + x1: x0 + 2x1 <= x6 <= x0 + 2x1 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 1, 2 } ), - Vector( { 1, 2 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 1, 2 } ), + Vector( { 1, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); } void test_parameterised_symbolic_bound_maps_abs_positive_and_negative() @@ -6202,37 +6217,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x2 = 2x0 + 3x1 - 30, x3 = x0 + x1: -3x0 - 4x1 + 30 <= x6 <= -3x0 - 4x1 + 30 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { -1, 0, 0, 1 } ), - Vector( { -1, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { -1, -1 } ), - Vector( { -1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { -3, -4 } ), - Vector( { -3, -4 } ), - Vector( { 30 } ), - Vector( { 30 } ) ); + Vector( { -1, 0, 0, 1 } ), + Vector( { -1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -3, -4 } ), + Vector( { -3, -4 } ), + Vector( { 30 } ), + Vector( { 30 } ) ); } void test_parameterised_symbolic_bound_maps_absolute_values_positive_and_not_fixed() @@ -6387,37 +6402,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x3 = x0 + x1: -x0 - x1 <= x6 <= -x0 - x1 + 12 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 12, 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0, -1 } ), - Vector( { 0, -1 } ), - Vector( { 0 } ), - Vector( { 12 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { -1, -1 } ), - Vector( { -1, -1 } ), - Vector( { 0 } ), - Vector( { 12 } ) ); + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 12, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 0 } ), + Vector( { 12 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 0 } ), + Vector( { 12 } ) ); } void test_parameterised_symbolic_bound_maps_absolute_values_active_and_externally_fixed() @@ -6574,37 +6589,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x3 = x0 + x1: - x0 - x1 + 3 <= x6 <= - x0 - x1 + 3 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { -1, 0, 0, 1 } ), - Vector( { -1, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0, -1 } ), - Vector( { 0, -1 } ), - Vector( { 3 } ), - Vector( { 3 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { -1, -1 } ), - Vector( { -1, -1 } ), - Vector( { 3 } ), - Vector( { 3 } ) ); + Vector( { -1, 0, 0, 1 } ), + Vector( { -1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, -1 } ), + Vector( { 0, -1 } ), + Vector( { 3 } ), + Vector( { 3 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { 3 } ), + Vector( { 3 } ) ); } void test_parameterised_symbolic_bound_maps_signs_positive_and_not_fixed() @@ -6765,37 +6780,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x2 = 2x0 + 3x1 - 15: 1/6 x0 + 1/4 x1 - 3.25 <= x6 <= 0.5 x0 + 0.75x1 - 3.75 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0.0833, 0, 0, 0 } ), - Vector( { 0.25, 0, 0, 0 } ), - Vector( { -1, 1 } ), - Vector( { 1, 1 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0.0833, 0 } ), - Vector( { 0.25, 0 } ), - Vector( { -2 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 0.1667, 0.25 } ), - Vector( { 0.5, 0.75 } ), - Vector( { -3.25 } ), - Vector( { -3.75 } ) ); + Vector( { 0.0833, 0, 0, 0 } ), + Vector( { 0.25, 0, 0, 0 } ), + Vector( { -1, 1 } ), + Vector( { 1, 1 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0.0833, 0 } ), + Vector( { 0.25, 0 } ), + Vector( { -2 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0.1667, 0.25 } ), + Vector( { 0.5, 0.75 } ), + Vector( { -3.25 } ), + Vector( { -3.75 } ) ); } void test_parameterised_symbolic_bound_maps_signs_active_and_externally_fixed() @@ -6946,37 +6961,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 0: -2 <= x6 <= -2 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0, 0, 0, 0 } ), - Vector( { 0, 0, 0, 0 } ), - Vector( { -1, 1 } ), - Vector( { -1, 1 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1 } ), - Vector( { 1, -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0, 0 } ), - Vector( { 0, 0 } ), - Vector( { -2 } ), - Vector( { -2 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 0, 0 } ), - Vector( { 0, 0 } ), - Vector( { -2 } ), - Vector( { -2 } ) ); + Vector( { 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0 } ), + Vector( { -1, 1 } ), + Vector( { -1, 1 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1 } ), + Vector( { 1, -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { -2 } ), + Vector( { -2 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { -2 } ), + Vector( { -2 } ) ); } void test_parameterised_symbolic_bound_maps_leaky_relu() @@ -7156,56 +7171,56 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0.72 x0 + 0.72 x1 + 1 <= x10 <= 0.912 x0 + 0.72 x1 + 3.688 0.72 x1 - 0.48 <= x11 <= 0.72 x1 + 1.28 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0.6, 0, 0, 0.6 } ), - Vector( { 0.6, 0, 0, 0.6 } ), - Vector( { 0, 0 } ), - Vector( { 0.8, 0.8 } ) ); - - comparePredecessorLayerSymbolicBounds( nlr, - 4, - Vector( { 0.6, 0, 0, 0.6 } ), - Vector( { 0.76, 0, 0, 0.6 } ), - Vector( { 0, 0 } ), - Vector( { 0.672, 0.8 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 5, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.6, 0, 0, 0.6 } ), + Vector( { 0.6, 0, 0, 0.6 } ), Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + Vector( { 0.8, 0.8 } ) ); + + comparePredecessorSymbolicBounds( nlr, 4, - Vector( { 1, 0, 1, 1 } ), - Vector( { 1, 0, 1, 1 } ), - Vector( { 1, 0 } ), - Vector( { 1, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 0.6, 0, 0.6, 0.6 } ), - Vector( { 0.76, 0, 0.6, 0.6 } ), - Vector( { 1, 0 } ), - Vector( { 2.472, 0.8 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 2, - Vector( { 1.2, 0.6, 0, -0.6 } ), - Vector( { 1.36, 0.6, 0.16, -0.6 } ), - Vector( { 1, 0 } ), - Vector( { 2.472, 0.8 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0.72, 0.36, 0, -0.36 } ), - Vector( { 0.816, 0.36, 0.096, -0.36 } ), - Vector( { 1, -0.48 } ), - Vector( { 3.688, 1.28 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 0.72, 0, 0.72, 0.72 } ), - Vector( { 0.912, 0, 0.72, 0.72 } ), - Vector( { 1, -0.48 } ), - Vector( { 3.688, 1.28 } ) ); + Vector( { 0.6, 0, 0, 0.6 } ), + Vector( { 0.76, 0, 0, 0.6 } ), + Vector( { 0, 0 } ), + Vector( { 0.672, 0.8 } ) ); + + compareOutputSymbolicBounds( nlr, + 5, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1, 0, 1, 1 } ), + Vector( { 1, 0, 1, 1 } ), + Vector( { 1, 0 } ), + Vector( { 1, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 0.6, 0, 0.6, 0.6 } ), + Vector( { 0.76, 0, 0.6, 0.6 } ), + Vector( { 1, 0 } ), + Vector( { 2.472, 0.8 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1.2, 0.6, 0, -0.6 } ), + Vector( { 1.36, 0.6, 0.16, -0.6 } ), + Vector( { 1, 0 } ), + Vector( { 2.472, 0.8 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0.72, 0.36, 0, -0.36 } ), + Vector( { 0.816, 0.36, 0.096, -0.36 } ), + Vector( { 1, -0.48 } ), + Vector( { 3.688, 1.28 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0.72, 0, 0.72, 0.72 } ), + Vector( { 0.912, 0, 0.72, 0.72 } ), + Vector( { 1, -0.48 } ), + Vector( { 3.688, 1.28 } ) ); } void test_parameterised_symbolic_bound_maps_sigmoids_and_round() @@ -7358,49 +7373,49 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0.2100 x0 + 0.1584 <= x8 <= 0.2100 x0 + 1.8416 0.2100 x1 - 0.8416 <= x9 <= 0.2100 x1 + 0.8516 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0.1050, 0, 0, 0.1050 } ), - Vector( { 0.1050, 0, 0, 0.1050 } ), - Vector( { 0.3292, 0.3292 } ), - Vector( { 0.6708, 0.6708 } ) ); - comparePredecessorLayerSymbolicBounds( nlr, - 4, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), - Vector( { -0.5, -0.5 } ), - Vector( { 0.5, 0.5 } ) ); - - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.1050, 0, 0, 0.1050 } ), + Vector( { 0.1050, 0, 0, 0.1050 } ), + Vector( { 0.3292, 0.3292 } ), + Vector( { 0.6708, 0.6708 } ) ); + comparePredecessorSymbolicBounds( nlr, 4, Vector( { 1, 0, 0, 1 } ), Vector( { 1, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), Vector( { -0.5, -0.5 } ), Vector( { 0.5, 0.5 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 2, - Vector( { 1, 1, 1, -1 } ), - Vector( { 1, 1, 1, -1 } ), - Vector( { -0.5, -0.5 } ), - Vector( { 0.5, 0.5 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), - Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), - Vector( { 0.1584, -0.8416 } ), - Vector( { 1.8416, 0.8416 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 0.2100, 0, 0, 0.2100 } ), - Vector( { 0.2100, 0, 0, 0.2100 } ), - Vector( { 0.1584, -0.8416 } ), - Vector( { 1.8416, 0.8416 } ) ); + + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, 1, 1, -1 } ), + Vector( { 1, 1, 1, -1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), + Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), + Vector( { 0.1584, -0.8416 } ), + Vector( { 1.8416, 0.8416 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0.2100, 0, 0, 0.2100 } ), + Vector( { 0.2100, 0, 0, 0.2100 } ), + Vector( { 0.1584, -0.8416 } ), + Vector( { 1.8416, 0.8416 } ) ); } void test_parameterised_symbolic_bound_maps_max_not_fixed() @@ -7523,49 +7538,49 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x2 = x0 + x1: x0 + x1 <= x7 <= 6 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0.5, 0, 0, 0.5 } ), - Vector( { 0.6, 0, 0, 0.4 } ), - Vector( { 0, 0 } ), - Vector( { 1.2, 1.2 } ) ); - comparePredecessorLayerSymbolicBounds( nlr, - 3, - Vector( { 1, 0 } ), - Vector( { 0, 0 } ), - Vector( { 0 } ), - Vector( { 3 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 4, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 2 } ), - Vector( { 2 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 2, 0 } ), + Vector( { 0.5, 0, 0, 0.5 } ), + Vector( { 0.6, 0, 0, 0.4 } ), Vector( { 0, 0 } ), - Vector( { 0 } ), - Vector( { 6 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, + Vector( { 1.2, 1.2 } ) ); + comparePredecessorSymbolicBounds( nlr, + 3, Vector( { 1, 0 } ), Vector( { 0, 0 } ), Vector( { 0 } ), - Vector( { 6 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 1, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0 } ), - Vector( { 6 } ) ); + Vector( { 3 } ) ); + + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 2 } ), + Vector( { 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 2, 0 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 6 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 1, 0 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 6 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 1, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0 } ), + Vector( { 6 } ) ); } void test_parameterised_symbolic_bound_maps_max_fixed() @@ -7681,49 +7696,49 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Using x3 = x0 - x1 2x0 - 2x1 <= x7 <= 2x0 - 2x1 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - comparePredecessorLayerSymbolicBounds( nlr, - 3, - Vector( { 0, 1 } ), - Vector( { 0, 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 4, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 2 } ), - Vector( { 2 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0, 2 } ), - Vector( { 0, 2 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0, 2 } ), - Vector( { 0, 2 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 2, -2 } ), - Vector( { 2, -2 } ), + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + comparePredecessorSymbolicBounds( nlr, + 3, + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), Vector( { 0 } ), Vector( { 0 } ) ); + + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 2 } ), + Vector( { 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 0, 2 } ), + Vector( { 0, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 2 } ), + Vector( { 0, 2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 2, -2 } ), + Vector( { 2, -2 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); } void test_parameterised_symbolic_bound_maps_softmax1() @@ -7953,53 +7968,53 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 1 <= x9 <= 1 -1 <= x10 <= -1 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0.1922, - -0.1830, - -0.0091, - -0.1830, - 0.2078, - -0.0248, - -0.0091, - -0.0248, - 0.0339 } ), - Vector( { 0.1922, - -0.1830, - -0.0091, - -0.1830, - 0.2078, - -0.0248, - -0.0091, - -0.0248, - 0.0339 } ), - Vector( { 0.4243, 0.4481, 0.1277 } ), - Vector( { 0.4243, 0.4480, 0.1277 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1, 1, -1, 1, -1 } ), - Vector( { 1, -1, 1, -1, 1, -1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0, 0, 0, 0, 0, 0 } ), - Vector( { 0, 0, 0, 0, 0, 0 } ), - Vector( { 1, -1 } ), - Vector( { 1, -1 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 0, 0, 0, 0, 0, 0 } ), - Vector( { 0, 0, 0, 0, 0, 0 } ), - Vector( { 1, -1 } ), - Vector( { 1, -1 } ) ); + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.4243, 0.4481, 0.1277 } ), + Vector( { 0.4243, 0.4480, 0.1277 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); } { Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); @@ -8199,53 +8214,53 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 1 <= x9 <= 1 -1 <= x10 <= -1 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 0.1922, - -0.1830, - -0.0091, - -0.1830, - 0.2078, - -0.0248, - -0.0091, - -0.0248, - 0.0339 } ), - Vector( { 0.1922, - -0.1830, - -0.0091, - -0.1830, - 0.2078, - -0.0248, - -0.0091, - -0.0248, - 0.0339 } ), - Vector( { 0.4243, 0.4481, 0.1277 } ), - Vector( { 0.4243, 0.4480, 0.1277 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, -1, 1, -1, 1, -1 } ), - Vector( { 1, -1, 1, -1, 1, -1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { 0, 0, 0, 0, 0, 0 } ), - Vector( { 0, 0, 0, 0, 0, 0 } ), - Vector( { 1, -1 } ), - Vector( { 1, -1 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { 0, 0, 0, 0, 0, 0 } ), - Vector( { 0, 0, 0, 0, 0, 0 } ), - Vector( { 1, -1 } ), - Vector( { 1, -1 } ) ); + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ), + Vector( { 0.4243, 0.4481, 0.1277 } ), + Vector( { 0.4243, 0.4480, 0.1277 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1, 1, -1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 0, 0, 0, 0, 0, 0 } ), + Vector( { 1, -1 } ), + Vector( { 1, -1 } ) ); } } @@ -8541,7 +8556,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 1 <= x15 <= 1 -1 <= x16 <= -1 */ - comparePredecessorLayerSymbolicBounds( + comparePredecessorSymbolicBounds( nlr, 2, Vector( { 0.1155, 0.0000, -0.1017, 0.0000, -0.0138, 0.0000, 0.0177, @@ -8555,32 +8570,32 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 0.6084, 0.9114, 0.3170, 0.0886, 0.0747 } ), Vector( { 0.6084, 0.9114, 0.3170, 0.0886, 0.0747 } ) ); - compareOutputLayerSymbolicBounds( + compareOutputSymbolicBounds( nlr, 3, Vector( { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 } ), Vector( { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 } ), Vector( { 0, 0, 0, 0 } ), Vector( { 0, 0, 0, 0 } ) ); - compareOutputLayerSymbolicBounds( + compareOutputSymbolicBounds( nlr, 2, Vector( { 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0 } ), Vector( { 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0, 0, 0, 1, -1, 1, -1, 0, 0 } ), Vector( { 0, 0, 0, 0 } ), Vector( { 0, 0, 0, 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( 20, 0 ), - Vector( 20, 0 ), - Vector( { 1, -1, 1, -1 } ), - Vector( { 1, -1, 1, -1 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( 12, 0 ), - Vector( 12, 0 ), - Vector( { 1, -1, 1, -1 } ), - Vector( { 1, -1, 1, -1 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( 20, 0 ), + Vector( 20, 0 ), + Vector( { 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( 12, 0 ), + Vector( 12, 0 ), + Vector( { 1, -1, 1, -1 } ), + Vector( { 1, -1, 1, -1 } ) ); } void test_parameterised_symbolic_bound_maps_bilinear() @@ -8685,37 +8700,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -3.5 x0 - 0.5 x1 - 4.5 <= x5 <= -3.5 x0 - 0.5 x1 + 9.5 */ - comparePredecessorLayerSymbolicBounds( nlr, - 2, - Vector( { 1, 2.5 } ), - Vector( { 1, 2.5 } ), - Vector( { -9.5 } ), - Vector( { 4.5 } ) ); - - compareOutputLayerSymbolicBounds( nlr, - 3, - Vector( { 1 } ), - Vector( { 1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, + comparePredecessorSymbolicBounds( nlr, 2, - Vector( { -1 } ), - Vector( { -1 } ), - Vector( { 0 } ), - Vector( { 0 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 1, - Vector( { -1, -2.5 } ), - Vector( { -1, -2.5 } ), - Vector( { -4.5 } ), - Vector( { 9.5 } ) ); - compareOutputLayerSymbolicBounds( nlr, - 0, - Vector( { -3.5, -0.5 } ), - Vector( { -3.5, -0.5 } ), - Vector( { -4.5 } ), - Vector( { 9.5 } ) ); + Vector( { 1, 2.5 } ), + Vector( { 1, 2.5 } ), + Vector( { -9.5 } ), + Vector( { 4.5 } ) ); + + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1 } ), + Vector( { 1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { -1 } ), + Vector( { -1 } ), + Vector( { 0 } ), + Vector( { 0 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { -1, -2.5 } ), + Vector( { -1, -2.5 } ), + Vector( { -4.5 } ), + Vector( { 9.5 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { -3.5, -0.5 } ), + Vector( { -3.5, -0.5 } ), + Vector( { -4.5 } ), + Vector( { 9.5 } ) ); } bool boundsEqual( const List &bounds, const List &expectedBounds ) @@ -8755,54 +8770,63 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } } - void comparePredecessorLayerSymbolicBounds( NLR::NetworkLevelReasoner &nlr, - unsigned layerIndex, - const Vector &symbolicLb, - const Vector &symbolicUb, - const Vector &symbolicLowerBias, - const Vector &symbolicUpperBias ) + void compareOutputSymbolicBounds( NLR::NetworkLevelReasoner &nlr, + unsigned layerIndex, + const Vector &symbolicLb, + const Vector &symbolicUb, + const Vector &symbolicLowerBias, + const Vector &symbolicUpperBias ) { - Vector predecessorLayerSymbolicLb; - Vector predecessorLayerSymbolicUb; - Vector predecessorLayerSymbolicLowerBias; - Vector predecessorLayerSymbolicUpperBias; - TS_ASSERT_THROWS_NOTHING( predecessorLayerSymbolicLb = - nlr.getSymbolicLbInTermsOfPredecessor( layerIndex ) ); - TS_ASSERT_THROWS_NOTHING( predecessorLayerSymbolicUb = - nlr.getSymbolicUbInTermsOfPredecessor( layerIndex ) ); - TS_ASSERT_THROWS_NOTHING( predecessorLayerSymbolicLowerBias = - nlr.getSymbolicLowerBiasInTermsOfPredecessor( layerIndex ) ); - TS_ASSERT_THROWS_NOTHING( predecessorLayerSymbolicUpperBias = - nlr.getSymbolicUpperBiasInTermsOfPredecessor( layerIndex ) ); - TS_ASSERT( compareVectors( predecessorLayerSymbolicLb, symbolicLb ) ); - TS_ASSERT( compareVectors( predecessorLayerSymbolicUb, symbolicUb ) ); - TS_ASSERT( compareVectors( predecessorLayerSymbolicLowerBias, symbolicLowerBias ) ); - TS_ASSERT( compareVectors( predecessorLayerSymbolicUpperBias, symbolicUpperBias ) ); + Vector outputSymbolicLb; + Vector outputSymbolicUb; + Vector outputSymbolicLowerBias; + Vector outputSymbolicUpperBias; + TS_ASSERT_THROWS_NOTHING( outputSymbolicLb = nlr.getOutputSymbolicLb( layerIndex ) ); + TS_ASSERT_THROWS_NOTHING( outputSymbolicUb = nlr.getOutputSymbolicUb( layerIndex ) ); + TS_ASSERT_THROWS_NOTHING( outputSymbolicLowerBias = + nlr.getOutputSymbolicLowerBias( layerIndex ) ); + TS_ASSERT_THROWS_NOTHING( outputSymbolicUpperBias = + nlr.getOutputSymbolicUpperBias( layerIndex ) ); + TS_ASSERT( compareVectors( outputSymbolicLb, symbolicLb ) ); + TS_ASSERT( compareVectors( outputSymbolicUb, symbolicUb ) ); + TS_ASSERT( compareVectors( outputSymbolicLowerBias, symbolicLowerBias ) ); + TS_ASSERT( compareVectors( outputSymbolicUpperBias, symbolicUpperBias ) ); } - void compareOutputLayerSymbolicBounds( NLR::NetworkLevelReasoner &nlr, + void comparePredecessorSymbolicBounds( NLR::NetworkLevelReasoner &nlr, unsigned layerIndex, const Vector &symbolicLb, const Vector &symbolicUb, const Vector &symbolicLowerBias, const Vector &symbolicUpperBias ) { - Vector outputLayerSymbolicLb; - Vector outputLayerSymbolicUb; - Vector outputLayerSymbolicLowerBias; - Vector outputLayerSymbolicUpperBias; - TS_ASSERT_THROWS_NOTHING( outputLayerSymbolicLb = - nlr.getOutputLayerSymbolicLb( layerIndex ) ); - TS_ASSERT_THROWS_NOTHING( outputLayerSymbolicUb = - nlr.getOutputLayerSymbolicUb( layerIndex ) ); - TS_ASSERT_THROWS_NOTHING( outputLayerSymbolicLowerBias = - nlr.getOutputLayerSymbolicLowerBias( layerIndex ) ); - TS_ASSERT_THROWS_NOTHING( outputLayerSymbolicUpperBias = - nlr.getOutputLayerSymbolicUpperBias( layerIndex ) ); - TS_ASSERT( compareVectors( outputLayerSymbolicLb, symbolicLb ) ); - TS_ASSERT( compareVectors( outputLayerSymbolicUb, symbolicUb ) ); - TS_ASSERT( compareVectors( outputLayerSymbolicLowerBias, symbolicLowerBias ) ); - TS_ASSERT( compareVectors( outputLayerSymbolicUpperBias, symbolicUpperBias ) ); + Vector predecessorSymbolicLb; + Vector predecessorSymbolicUb; + Vector predecessorSymbolicLowerBias; + Vector predecessorSymbolicUpperBias; + TS_ASSERT_THROWS_NOTHING( predecessorSymbolicLb = + nlr.getPredecessorSymbolicLb( layerIndex ) ); + TS_ASSERT_THROWS_NOTHING( predecessorSymbolicUb = + nlr.getPredecessorSymbolicUb( layerIndex ) ); + TS_ASSERT_THROWS_NOTHING( predecessorSymbolicLowerBias = + nlr.getPredecessorSymbolicLowerBias( layerIndex ) ); + TS_ASSERT_THROWS_NOTHING( predecessorSymbolicUpperBias = + nlr.getPredecessorSymbolicUpperBias( layerIndex ) ); + TS_ASSERT( compareVectors( predecessorSymbolicLb, symbolicLb ) ); + TS_ASSERT( compareVectors( predecessorSymbolicUb, symbolicUb ) ); + TS_ASSERT( compareVectors( predecessorSymbolicLowerBias, symbolicLowerBias ) ); + TS_ASSERT( compareVectors( predecessorSymbolicUpperBias, symbolicUpperBias ) ); + } + + void comparePMNRScores( NLR::NetworkLevelReasoner &nlr, + const Map &neuronScores ) + { + double PMNRScore; + for ( const auto &pair : neuronScores ) + { + TS_ASSERT_THROWS_NOTHING( PMNRScore = nlr.getPMNRScore( pair.first ) ); + TS_ASSERT( FloatUtils::areEqual( PMNRScore, pair.second, 0.0001 ) ); + } } bool compareVectors( const Vector &vectorA, const Vector &vectorB ) From 4a00ed22a95e5e9c30540596f7c21e205770df9f Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Sun, 24 Aug 2025 21:59:20 +0300 Subject: [PATCH 70/81] Fixed changes in nlr/CMakeLists.txt. --- src/nlr/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/src/nlr/CMakeLists.txt b/src/nlr/CMakeLists.txt index ef89c9492d..9a23820e27 100644 --- a/src/nlr/CMakeLists.txt +++ b/src/nlr/CMakeLists.txt @@ -22,7 +22,6 @@ network_level_reasoner_add_unit_test(PMNRSelection) if (${ENABLE_GUROBI}) network_level_reasoner_add_unit_test(LPRelaxation) - network_level_reasoner_add_unit_test(PMNR) endif() if (${BUILD_PYTHON}) From 146563fb11560a4a99402623a7aea52ba6838bbb Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Sun, 24 Aug 2025 22:03:47 +0300 Subject: [PATCH 71/81] Revert last commit. --- src/nlr/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/src/nlr/CMakeLists.txt b/src/nlr/CMakeLists.txt index 9a23820e27..ef89c9492d 100644 --- a/src/nlr/CMakeLists.txt +++ b/src/nlr/CMakeLists.txt @@ -22,6 +22,7 @@ network_level_reasoner_add_unit_test(PMNRSelection) if (${ENABLE_GUROBI}) network_level_reasoner_add_unit_test(LPRelaxation) + network_level_reasoner_add_unit_test(PMNR) endif() if (${BUILD_PYTHON}) From bd11626130f8a372a0074d57418a87ff180b13c8 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Wed, 17 Sep 2025 20:55:55 +0300 Subject: [PATCH 72/81] Correcting SBT, LP for Sigmoid, Bilinear, Softmax. SBT for Sigmoid: Corrected criterion for fixed Sigmoid neuron. SBT for Softmax: Symbolic bounds now stored correctly in _symbolicLb, _symbolicUb. SBT for Bilinear: Fixed calculation for symbolic lower/upper bias, fixed mistaken assumption both source neurons are from same layer. Parameterized SBT for Bilinear: Fixed calculation for symbolic lower/upper bias, fixed mistaken assumption both source neurons are from same layer. LP relaxation for Bilinear: Fixed mistaken assumption both source neurons are from same layer. Parameterized LP relaxation for Bilinear: Fixed mistaken assumption both source neurons are from same layer. Minor changes for code of PreimageApproximation optimization function: size_t -> unsigned, define sign variable at start of function, use Vector( double, unsigned ) constructor and std::min/std::max. --- src/nlr/LPFormulator.cpp | 195 +++++--------- src/nlr/Layer.cpp | 563 +++++++++++++++++++++++++++------------ 2 files changed, 458 insertions(+), 300 deletions(-) diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 67f2725155..84447306f9 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -248,11 +248,9 @@ void LPFormulator::optimizeBoundsWithIncrementalLpRelaxation( const Map &layers, - bool backward, - const Map> &layerIndicesToParameters, - const Vector &polygonalTightenings ) +void LPFormulator::optimizeBoundsWithLpRelaxation( const Map &layers, + bool backward, + const Vector &coeffs ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -305,8 +303,7 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( &solverToIndex ); // optimize every neuron of layer - optimizeBoundsOfNeuronsWithLpRelaxation( - argument, backward, layerIndicesToParameters, polygonalTightenings ); + optimizeBoundsOfNeuronsWithLpRelaxation( argument, backward, coeffs ); LPFormulator_LOG( Stringf( "Tightening bound for layer %u - done", layerIndex ).ascii() ); } @@ -340,6 +337,13 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( throw InfeasibleQueryException(); } +void LPFormulator::optimizeBoundsWithPreimageApproximation( Map &layers ) +{ + const Vector &optimal_coeffs = layers[0]->OptimalParameterisedSymbolicBoundTightening(); + optimizeBoundsWithLpRelaxation( layers, false, optimal_coeffs ); + optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs ); +} + void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ) { @@ -413,11 +417,9 @@ void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map> &layerIndicesToParameters, - const Vector &polygonalTightenings ) +void LPFormulator::optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args, + bool backward, + const Vector &coeffs ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -530,17 +532,9 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRelaxation( mtx.lock(); if ( backward ) - createLPRelaxationAfter( layers, - *freeSolver, - lastIndexOfRelaxation, - layerIndicesToParameters, - polygonalTightenings ); + createLPRelaxationAfter( layers, *freeSolver, lastIndexOfRelaxation, coeffs ); else - createLPRelaxation( layers, - *freeSolver, - lastIndexOfRelaxation, - layerIndicesToParameters, - polygonalTightenings ); + createLPRelaxation( layers, *freeSolver, lastIndexOfRelaxation, coeffs ); mtx.unlock(); // spawn a thread to tighten the bounds for the current variable @@ -660,12 +654,10 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & } } -void LPFormulator::createLPRelaxation( - const Map &layers, - GurobiWrapper &gurobi, - unsigned lastLayer, - const Map> &layerIndicesToParameters, - const Vector &polygonalTightenings ) +void LPFormulator::createLPRelaxation( const Map &layers, + GurobiWrapper &gurobi, + unsigned lastLayer, + const Vector &coeffs ) { for ( const auto &layer : layers ) { @@ -673,23 +665,22 @@ void LPFormulator::createLPRelaxation( if ( currentLayerIndex > lastLayer ) continue; - if ( layerIndicesToParameters.empty() ) + if ( coeffs.empty() ) addLayerToModel( gurobi, layer.second, false ); else { + Map> layerIndicesToParameters = + layer.second->getParametersForLayers( layers, coeffs ); const Vector ¤tLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; addLayerToParameterisedModel( gurobi, layer.second, false, currentLayerCoeffs ); } } - addPolyognalTighteningsToLpRelaxation( gurobi, layers, 0, lastLayer, polygonalTightenings ); } -void LPFormulator::createLPRelaxationAfter( - const Map &layers, - GurobiWrapper &gurobi, - unsigned firstLayer, - const Map> &layerIndicesToParameters, - const Vector &polygonalTightenings ) +void LPFormulator::createLPRelaxationAfter( const Map &layers, + GurobiWrapper &gurobi, + unsigned firstLayer, + const Vector &coeffs ) { unsigned depth = GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH; std::priority_queue, std::greater> layersToAdd; @@ -707,10 +698,12 @@ void LPFormulator::createLPRelaxationAfter( continue; else { - if ( layerIndicesToParameters.empty() ) + if ( coeffs.empty() ) addLayerToModel( gurobi, currentLayer, true ); else { + Map> layerIndicesToParameters = + currentLayer->getParametersForLayers( layers, coeffs ); const Vector ¤tLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; addLayerToParameterisedModel( gurobi, currentLayer, true, currentLayerCoeffs ); @@ -725,8 +718,6 @@ void LPFormulator::createLPRelaxationAfter( } } } - addPolyognalTighteningsToLpRelaxation( - gurobi, layers, firstLayer, layersToAdd.top(), polygonalTightenings ); } void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, @@ -1347,9 +1338,11 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, } double ub = - std::min( Layer::linearUpperBound( sourceLbs, sourceUbs, index ), layer->getUb( i ) ); + std::min( DeepPolySoftmaxElement::linearUpperBound( sourceLbs, sourceUbs, index ), + layer->getUb( i ) ); double lb = - std::max( Layer::linearLowerBound( sourceLbs, sourceUbs, index ), layer->getLb( i ) ); + std::max( DeepPolySoftmaxElement::linearLowerBound( sourceLbs, sourceUbs, index ), + layer->getLb( i ) ); targetLbs[index] = lb; targetUbs[index] = ub; @@ -1381,13 +1374,14 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, { terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = Layer::LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); + bias = DeepPolySoftmaxElement::LSELowerBound( + sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = Layer::dLSELowerBound( + double dldj = DeepPolySoftmaxElement::dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); @@ -1400,13 +1394,14 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, { terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); + bias = DeepPolySoftmaxElement::LSELowerBound2( + sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = Layer::dLSELowerBound2( + double dldj = DeepPolySoftmaxElement::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); @@ -1418,14 +1413,15 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + bias = DeepPolySoftmaxElement::LSEUpperBound( + sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = Layer::dLSEUpperbound( + double dudj = DeepPolySoftmaxElement::dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); bias -= dudj * sourceMids[inputIndex]; @@ -1437,15 +1433,16 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, { terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = Layer::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + bias = + DeepPolySoftmaxElement::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); unsigned inputIndex = 0; for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = - Layer::dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + double dldj = DeepPolySoftmaxElement::dERLowerBound( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); bias -= dldj * sourceMids[inputIndex]; ++inputIndex; @@ -1454,15 +1451,16 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = Layer::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); + bias = + DeepPolySoftmaxElement::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = - Layer::dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + double dudj = DeepPolySoftmaxElement::dERUpperBound( + sourceMids, targetLbs, targetUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); bias -= dudj * sourceMids[inputIndex]; ++inputIndex; @@ -1485,20 +1483,21 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; Vector sourceNeurons; + Vector sourceLayers; bool allConstant = true; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + sourceLayers.append( sourceLayer ); sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); @@ -1547,10 +1546,10 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addGeqConstraint( terms, -sourceLbs[0] * sourceLbs[1] ); // Upper bound: out <= u_y * x + l_x * y - l_x * u_y @@ -1558,10 +1557,10 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -sourceUbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addLeqConstraint( terms, -sourceLbs[0] * sourceUbs[1] ); } } @@ -2050,20 +2049,21 @@ void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &g List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; Vector sourceNeurons; + Vector sourceLayers; bool allConstant = true; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + sourceLayers.append( sourceLayer ); sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); @@ -2115,17 +2115,17 @@ void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &g // Upper bound: out <= a_u * x + b_u * y + c_u, where // a_u = alpha2 * u_y + ( 1 - alpha2 ) * l_y // b_u = alpha2 * l_x + ( 1 - alpha2 ) * u_x - // c_u = -alpha2 * -l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + // c_u = -alpha2 * l_x * u_y - ( 1 - alpha2 ) * u_x * l_y List terms; terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -coeffs[0] * sourceLbs[1] - ( 1 - coeffs[0] ) * sourceUbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -coeffs[0] * sourceLbs[0] - ( 1 - coeffs[0] ) * sourceUbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addGeqConstraint( terms, -coeffs[0] * sourceLbs[0] * sourceLbs[1] - ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1] ); @@ -2134,10 +2134,10 @@ void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &g terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -coeffs[1] * sourceUbs[1] - ( 1 - coeffs[1] ) * sourceLbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -coeffs[1] * sourceLbs[0] - ( 1 - coeffs[1] ) * sourceUbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addLeqConstraint( terms, -coeffs[1] * sourceLbs[0] * sourceUbs[1] - ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1] ); @@ -2145,69 +2145,6 @@ void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &g } } -void LPFormulator::addPolyognalTighteningsToLpRelaxation( - GurobiWrapper &gurobi, - const Map &layers, - unsigned firstLayer, - unsigned lastLayer, - const Vector &polygonalTightenings ) -{ - List terms; - for ( const auto &tightening : polygonalTightenings ) - { - Map neuronToCoefficient = tightening._neuronToCoefficient; - PolygonalTightening::PolygonalBoundType type = tightening._type; - double value = tightening._value; - - bool outOfBounds = false; - for ( const auto &pair : neuronToCoefficient ) - { - unsigned currentLayerIndex = pair.first._layer; - if ( currentLayerIndex < firstLayer || currentLayerIndex > lastLayer ) - { - outOfBounds = true; - } - } - if ( outOfBounds ) - { - continue; - } - - terms.clear(); - for ( const auto &pair : neuronToCoefficient ) - { - unsigned currentLayerIndex = pair.first._layer; - unsigned i = pair.first._neuron; - double coeff = pair.second; - Layer *layer = layers[currentLayerIndex]; - - if ( !layer->neuronEliminated( i ) ) - { - unsigned variable = layer->neuronToVariable( i ); - String variableName = Stringf( "x%u", variable ); - if ( !gurobi.containsVariable( variableName ) ) - { - gurobi.addVariable( variableName, layer->getLb( i ), layer->getUb( i ) ); - } - terms.append( GurobiWrapper::Term( coeff, Stringf( "x%u", variable ) ) ); - } - else - { - value -= coeff * layer->getEliminatedNeuronValue( i ); - } - } - - if ( type == PolygonalTightening::UB ) - { - gurobi.addLeqConstraint( terms, value ); - } - else - { - gurobi.addGeqConstraint( terms, value ); - } - } -} - void LPFormulator::setCutoff( double cutoff ) { _cutoffInUse = true; diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 43f24e341b..d4ae7af8a2 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -77,15 +77,7 @@ void Layer::allocateMemory() if ( Options::get()->getSymbolicBoundTighteningType() == SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING || Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) { _symbolicLb = new double[_size * _inputLayerSize]; _symbolicUb = new double[_size * _inputLayerSize]; @@ -936,23 +928,23 @@ void Layer::computeIntervalArithmeticBoundsForSign() double lb = sourceLayer->getLb( sourceIndex._neuron ); double ub = sourceLayer->getUb( sourceIndex._neuron ); - double newLb; - double newUb; + double new_lb; + double new_ub; if ( !FloatUtils::isNegative( lb ) ) { - newLb = 1; - newUb = 1; + new_lb = 1; + new_ub = 1; } else if ( FloatUtils::isNegative( ub ) ) { - newLb = -1; - newUb = -1; + new_lb = -1; + new_ub = -1; } else { - newLb = -1; - newUb = 1; + new_lb = -1; + new_ub = 1; } /* @@ -960,16 +952,16 @@ void Layer::computeIntervalArithmeticBoundsForSign() variable. If they are tigheter than what was previously known, store them. */ - if ( _lb[i] < newLb ) + if ( _lb[i] < new_lb ) { - _lb[i] = newLb; + _lb[i] = new_lb; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( _ub[i] > newUb ) + if ( _ub[i] > new_ub ) { - _ub[i] = newUb; + _ub[i] = new_ub; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); } @@ -1233,8 +1225,8 @@ void Layer::computeIntervalArithmeticBoundsForSoftmax() } } - double lb = linearLowerBound( sourceLbs, sourceUbs, index ); - double ub = linearUpperBound( sourceLbs, sourceUbs, index ); + double lb = softmaxLinearLowerBound( sourceLbs, sourceUbs, index ); + double ub = softmaxLinearUpperBound( sourceLbs, sourceUbs, index ); if ( _lb[i] < lb ) { _lb[i] = lb; @@ -2712,7 +2704,6 @@ void Layer::computeSymbolicBoundsForSoftmax() Vector sourceMids; Vector targetLbs; Vector targetUbs; - unsigned len = 0; for ( const auto &sourceIndex : sources ) { unsigned sourceNeuron = sourceIndex._neuron; @@ -2724,8 +2715,6 @@ void Layer::computeSymbolicBoundsForSoftmax() sourceMids.append( ( sourceLb + sourceUb ) / 2 ); targetLbs.append( _lb[i] ); targetUbs.append( _ub[i] ); - - ++len; } // Find the index of i in the softmax @@ -2741,8 +2730,8 @@ void Layer::computeSymbolicBoundsForSoftmax() } } - double lb = linearLowerBound( sourceLbs, sourceUbs, index ); - double ub = linearUpperBound( sourceLbs, sourceUbs, index ); + double lb = softmaxLinearLowerBound( sourceLbs, sourceUbs, index ); + double ub = softmaxLinearUpperBound( sourceLbs, sourceUbs, index ); if ( _lb[i] < lb ) { _lb[i] = lb; @@ -2764,8 +2753,8 @@ void Layer::computeSymbolicBoundsForSoftmax() _symbolicUpperBias[i] = _ub[i]; for ( const auto &sourceIndex : sources ) { - symbolicLb[len * sourceIndex._neuron + i] = 0; - symbolicUb[len * sourceIndex._neuron + i] = 0; + symbolicLb[_size * sourceIndex._neuron + i] = 0; + symbolicUb[_size * sourceIndex._neuron + i] = 0; } } else @@ -2783,12 +2772,12 @@ void Layer::computeSymbolicBoundsForSoftmax() if ( !useLSE2 ) { _symbolicLowerBias[i] = - LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); + softmaxLSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &sourceIndex : sources ) { - double dldj = - dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; + double dldj = softmaxdLSELowerBound( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -2796,48 +2785,51 @@ void Layer::computeSymbolicBoundsForSoftmax() else { _symbolicLowerBias[i] = - LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); + softmaxLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &sourceIndex : sources ) { - double dldj = - dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; + double dldj = softmaxdLSELowerBound2( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } } - _symbolicUpperBias[i] = LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + _symbolicUpperBias[i] = + softmaxLSEUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &sourceIndex : sources ) { - double dudj = - dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - symbolicUb[len * sourceIndex._neuron + i] = dudj; + double dudj = softmaxdLSEUpperbound( + sourceMids, targetLbs, targetUbs, index, inputIndex ); + symbolicUb[_size * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; } } else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) { - _symbolicLowerBias[i] = ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + _symbolicLowerBias[i] = + softmaxERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); unsigned inputIndex = 0; for ( const auto &sourceIndex : sources ) { double dldj = - dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; + softmaxdERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } - _symbolicUpperBias[i] = ERUpperBound( sourceMids, targetLbs, targetUbs, index ); + _symbolicUpperBias[i] = + softmaxERUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &sourceIndex : sources ) { double dudj = - dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - symbolicUb[len * sourceIndex._neuron + i] = dudj; + softmaxdERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + symbolicUb[_size * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; } @@ -3039,27 +3031,26 @@ void Layer::computeSymbolicBoundsForBilinear() List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; + Vector sourceNeurons; + Vector sourceLayerSizes; + Vector sourceLayers; bool allConstant = true; - unsigned indexA = 0; - unsigned indexB = 0; - unsigned counter = 0; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); + unsigned sourceLayerSize = sourceLayer->getSize(); + sourceLayers.append( sourceLayer ); + sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); + sourceLayerSizes.append( sourceLayerSize ); if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) { @@ -3070,16 +3061,6 @@ void Layer::computeSymbolicBoundsForBilinear() double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); sourceValues.append( sourceValue ); } - - if ( counter == 0 ) - { - indexA = sourceIndex._neuron; - } - else - { - indexB = sourceIndex._neuron; - } - ++counter; } if ( allConstant ) @@ -3109,47 +3090,75 @@ void Layer::computeSymbolicBoundsForBilinear() // Symbolic upper bound: // out <= alpha * x + beta * y + gamma // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y + double aLower = sourceLbs[1]; + double aUpper = sourceUbs[1]; + double bLower = sourceLbs[0]; + double bUpper = sourceLbs[0]; + _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; + _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - if ( sourceLbs[1] >= 0 ) + if ( aLower >= 0 ) { _symbolicLb[j * _size + i] += - sourceLbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + aLower * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; } else { _symbolicLb[j * _size + i] += - sourceLbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + aLower * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; } - if ( sourceUbs[1] >= 0 ) + if ( aUpper >= 0 ) { _symbolicUb[j * _size + i] += - sourceUbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + aUpper * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; } else { _symbolicUb[j * _size + i] += - sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + aUpper * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; } - if ( sourceLbs[0] >= 0 ) + if ( bLower >= 0 ) { _symbolicLb[j * _size + i] += - sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; - _symbolicUb[j * _size + i] += - sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + bLower * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; } else { _symbolicLb[j * _size + i] += - sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + bLower * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + } + + if ( bUpper >= 0 ) + { _symbolicUb[j * _size + i] += - sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; + bUpper * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + } + else + { + _symbolicUb[j * _size + i] += + bUpper * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; } } - _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; - _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); @@ -4096,27 +4105,26 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; + Vector sourceNeurons; + Vector sourceLayerSizes; + Vector sourceLayers; bool allConstant = true; - unsigned indexA = 0; - unsigned indexB = 0; - unsigned counter = 0; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); + unsigned sourceLayerSize = sourceLayer->getSize(); + sourceLayers.append( sourceLayer ); + sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); + sourceLayerSizes.append( sourceLayerSize ); if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) { @@ -4127,16 +4135,6 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); sourceValues.append( sourceValue ); } - - if ( counter == 0 ) - { - indexA = sourceIndex._neuron; - } - else - { - indexB = sourceIndex._neuron; - } - ++counter; } if ( allConstant ) @@ -4175,58 +4173,74 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector double bLower = coeffs[0] * sourceLbs[0] + ( 1 - coeffs[0] ) * sourceUbs[0]; double bUpper = coeffs[1] * sourceLbs[0] + ( 1 - coeffs[1] ) * sourceUbs[0]; + _symbolicLowerBias[i] = -coeffs[0] * sourceLbs[0] * sourceLbs[1] - + ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1]; + _symbolicUpperBias[i] = -coeffs[1] * sourceLbs[0] * sourceUbs[1] - + ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1]; + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { if ( aLower >= 0 ) { _symbolicLb[j * _size + i] += - aLower * sourceSymbolicLb[j * sourceLayerSize + indexA]; + aLower * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; } else { _symbolicLb[j * _size + i] += - aLower * sourceSymbolicUb[j * sourceLayerSize + indexA]; + aLower * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; } if ( aUpper >= 0 ) { _symbolicUb[j * _size + i] += - aUpper * sourceSymbolicUb[j * sourceLayerSize + indexA]; + aUpper * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; } else { _symbolicUb[j * _size + i] += - aUpper * sourceSymbolicLb[j * sourceLayerSize + indexA]; + aUpper * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; } if ( bLower >= 0 ) { _symbolicLb[j * _size + i] += - bLower * sourceSymbolicLb[j * sourceLayerSize + indexB]; + bLower * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; } else { _symbolicLb[j * _size + i] += - bLower * sourceSymbolicUb[j * sourceLayerSize + indexB]; + bLower * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; } - if ( bLower >= 0 ) + if ( bUpper >= 0 ) { _symbolicUb[j * _size + i] += - bUpper * sourceSymbolicUb[j * sourceLayerSize + indexB]; + bUpper * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; } else { _symbolicUb[j * _size + i] += - bUpper * sourceSymbolicLb[j * sourceLayerSize + indexB]; + bUpper * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; } } - _symbolicLowerBias[i] = -coeffs[0] * sourceLbs[0] * sourceLbs[1] - - ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1]; - _symbolicUpperBias[i] = -coeffs[1] * sourceLbs[0] * sourceUbs[1] - - ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1]; - double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); List values = { sourceLbs[0] * sourceLbs[1], @@ -4308,10 +4322,218 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector } } -double Layer::LSELowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +const Vector Layer::OptimalParameterisedSymbolicBoundTightening() +{ + const Map &layers = _layerOwner->getLayerIndexToLayer(); + + // Search over coeffs in [0, 1]^number_of_parameters with projected grdient descent. + unsigned max_iterations = + GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + double step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + double epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; + double weight_decay = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY; + double lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + unsigned dimension = getNumberOfParameters( layers ); + bool maximize = false; + double sign = ( maximize ? 1 : -1 ); + + Vector lower_bounds( dimension, 0 ); + Vector upper_bounds( dimension, 1 ); + + // Initialize initial guess uniformly. + Vector guess( dimension ); + std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); + std::uniform_real_distribution dis( 0, 1 ); + for ( unsigned j = 0; j < dimension; ++j ) + { + double lb = lower_bounds[j]; + double ub = upper_bounds[j]; + guess[j] = lb + dis( rng ) * ( ub - lb ); + } + + Vector> candidates( dimension ); + Vector gradient( dimension ); + + for ( unsigned i = 0; i < max_iterations; ++i ) + { + double current_cost = EstimateVolume( guess ); + for ( unsigned j = 0; j < dimension; ++j ) + { + candidates[j] = Vector( guess ); + candidates[j][j] += step_size; + + if ( candidates[j][j] > upper_bounds[j] || candidates[j][j] < lower_bounds[j] ) + { + gradient[j] = 0; + continue; + } + + double cost = EstimateVolume( candidates[j] ); + gradient[j] = ( cost - current_cost ) / step_size + weight_decay * guess[j]; + } + + bool gradient_is_zero = true; + for ( unsigned j = 0; j < dimension; ++j ) + { + if ( FloatUtils::abs( gradient[j] ) > epsilon ) + { + gradient_is_zero = false; + } + } + if ( gradient_is_zero ) + { + break; + } + + for ( unsigned j = 0; j < dimension; ++j ) + { + guess[j] += sign * lr * gradient[j]; + + guess[j] = std::min( guess[j], upper_bounds[j] ); + guess[j] = std::max( guess[j], lower_bounds[j] ); + } + } + + const Vector optimal_coeffs( guess ); + return optimal_coeffs; +} + +double Layer::EstimateVolume( const Vector &coeffs ) +{ + // First, run parameterised symbolic bound propagation. + const Map &layers = _layerOwner->getLayerIndexToLayer(); + Map> layerIndicesToParameters = + getParametersForLayers( layers, coeffs ); + for ( unsigned i = 0; i < layers.size(); ++i ) + { + ASSERT( layers.exists( i ) ); + const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; + layers[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); + } + + std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); + double log_box_volume = 0; + double sigmoid_sum = 0; + + unsigned inputLayerIndex = 0; + unsigned outputLayerIndex = layers.size() - 1; + Layer *inputLayer = layers[inputLayerIndex]; + Layer *outputLayer = layers[outputLayerIndex]; + + // Calculate volume of input variables' bounding box. + for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) + { + if ( inputLayer->neuronEliminated( index ) ) + continue; + + double lb = inputLayer->getLb( index ); + double ub = inputLayer->getUb( index ); + + if ( lb == ub ) + continue; + + log_box_volume += std::log( ub - lb ); + } + + for ( unsigned i = 0; i < GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; ++i ) + { + // Sample input point from known bounds. + Map point; + for ( unsigned j = 0; j < inputLayer->getSize(); ++j ) + { + if ( inputLayer->neuronEliminated( j ) ) + { + point.insert( j, 0 ); + } + else + { + double lb = inputLayer->getLb( j ); + double ub = inputLayer->getUb( j ); + std::uniform_real_distribution<> dis( lb, ub ); + point.insert( j, dis( rng ) ); + } + } + + // Calculate sigmoid of maximum margin from output symbolic bounds. + double max_margin = 0; + for ( unsigned j = 0; j < outputLayer->getSize(); ++j ) + { + if ( outputLayer->neuronEliminated( j ) ) + continue; + + double margin = outputLayer->calculateDifferenceFromSymbolic( point, j ); + max_margin = std::max( max_margin, margin ); + } + sigmoid_sum += SigmoidConstraint::sigmoid( max_margin ); + } + + return std::exp( log_box_volume + std::log( sigmoid_sum ) ) / + GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; +} + +double Layer::calculateDifferenceFromSymbolic( Map &point, unsigned i ) const +{ + double lowerSum = _symbolicLowerBias[i]; + double upperSum = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + lowerSum += _symbolicLb[j * _size + i] * point[j]; + upperSum += _symbolicUb[j * _size + i] * point[j]; + } + + return std::max( _ub[i] - upperSum, lowerSum - _lb[i] ); +} + + +unsigned Layer::getNumberOfParametersPerType( Type t ) const +{ + if ( t == Layer::RELU || t == Layer::LEAKY_RELU ) + return 1; + + if ( t == Layer::SIGN || t == Layer::BILINEAR ) + return 2; + + return 0; +} + +unsigned Layer::getNumberOfParameters( const Map &layers ) const +{ + unsigned index = 0; + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + index += getNumberOfParametersPerType( layer->getLayerType() ); + } + return index; +} + +Map> Layer::getParametersForLayers( const Map &layers, + const Vector &coeffs ) const +{ + unsigned index = 0; + Map> layerIndicesToParameters; + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + unsigned n_coeffs = getNumberOfParametersPerType( layer->getLayerType() ); + Vector current_coeffs( n_coeffs ); + for ( unsigned i = 0; i < n_coeffs; i++ ) + { + current_coeffs[i] = coeffs[index + i]; + } + layerIndicesToParameters.insert( layerIndex, current_coeffs ); + index += n_coeffs; + } + return layerIndicesToParameters; +} + +double Layer::softmaxLSELowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { double sum = 0; for ( unsigned j = 0; j < inputs.size(); ++j ) @@ -4326,15 +4548,15 @@ double Layer::LSELowerBound( const Vector &inputs, return std::exp( inputs[i] ) / sum; } -double Layer::dLSELowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) +double Layer::softmaxdLSELowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) { double val = 0; if ( i == di ) - val += LSELowerBound( inputMids, inputLbs, inputUbs, i ); + val += softmaxLSELowerBound( inputMids, inputLbs, inputUbs, i ); double ldi = inputLbs[di]; double udi = inputUbs[di]; @@ -4356,10 +4578,10 @@ double Layer::dLSELowerBound( const Vector &inputMids, return val; } -double Layer::LSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +double Layer::softmaxLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { double max = FloatUtils::negativeInfinity(); unsigned maxInputIndex = 0; @@ -4375,7 +4597,7 @@ double Layer::LSELowerBound2( const Vector &inputMids, } if ( maxInputIndex == i ) - return ERLowerBound( inputMids, inputLbs, inputUbs, i ); + return softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); else { double sum = 0; @@ -4398,11 +4620,11 @@ double Layer::LSELowerBound2( const Vector &inputMids, } } -double Layer::dLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) +double Layer::softmaxdLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) { double max = FloatUtils::negativeInfinity(); unsigned maxInputIndex = 0; @@ -4418,10 +4640,10 @@ double Layer::dLSELowerBound2( const Vector &inputMids, } if ( maxInputIndex == i ) - return dERLowerBound( inputMids, inputLbs, inputUbs, i, di ); + return softmaxdERLowerBound( inputMids, inputLbs, inputUbs, i, di ); else { - double val = LSELowerBound2( inputMids, inputLbs, inputUbs, i ); + double val = softmaxLSELowerBound2( inputMids, inputLbs, inputUbs, i ); double sum = 0; for ( unsigned j = 0; j < inputMids.size(); ++j ) @@ -4472,10 +4694,10 @@ double Layer::dLSELowerBound2( const Vector &inputMids, } } -double Layer::LSEUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) +double Layer::softmaxLSEUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -4488,11 +4710,11 @@ double Layer::LSEUpperBound( const Vector &inputs, SoftmaxConstraint::logSumOfExponential( inputTilda ) ); } -double Layer::dLSEUpperbound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) +double Layer::softmaxdLSEUpperbound( const Vector &inputMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -4506,10 +4728,10 @@ double Layer::dLSEUpperbound( const Vector &inputMids, return val * val2; } -double Layer::ERLowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +double Layer::softmaxERLowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { Vector inputTilda; SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); @@ -4533,13 +4755,13 @@ double Layer::ERLowerBound( const Vector &inputs, return 1 / sum; } -double Layer::dERLowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) +double Layer::softmaxdERLowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) { - double val = ERLowerBound( inputMids, inputLbs, inputUbs, i ); + double val = softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); if ( i != di ) { @@ -4564,10 +4786,10 @@ double Layer::dERLowerBound( const Vector &inputMids, } } -double Layer::ERUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) +double Layer::softmaxERUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -4578,16 +4800,15 @@ double Layer::ERUpperBound( const Vector &inputs, return ui + li - ui * li * SoftmaxConstraint::sumOfExponential( inputTilda ); } -double Layer::dERUpperBound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) +double Layer::softmaxdERUpperBound( const Vector &inputMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ) { double li = outputLb[i]; double ui = outputUb[i]; - if ( i == di ) { double val2 = -1; @@ -4599,9 +4820,9 @@ double Layer::dERUpperBound( const Vector &inputMids, return -li * ui * std::exp( inputMids[di] - inputMids[i] ); } -double Layer::linearLowerBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +double Layer::softmaxLinearLowerBound( const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { Vector uTilda; SoftmaxConstraint::xTilda( inputUbs, inputLbs[i], uTilda ); @@ -4609,9 +4830,9 @@ double Layer::linearLowerBound( const Vector &inputLbs, return 1 / SoftmaxConstraint::sumOfExponential( uTilda ); } -double Layer::linearUpperBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +double Layer::softmaxLinearUpperBound( const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { Vector lTilda; SoftmaxConstraint::xTilda( inputLbs, inputUbs[i], lTilda ); From 3b64c7584cd49aa0fed7304a0275a3fec67d9cf0 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Wed, 17 Sep 2025 21:10:29 +0300 Subject: [PATCH 73/81] Undo last commit: Relevant changes will be re-added in a larger commit. --- src/LPFormulator.cpp | 2217 ++++++++++++++++++ src/Layer.cpp | 5162 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 7379 insertions(+) create mode 100644 src/LPFormulator.cpp create mode 100644 src/Layer.cpp diff --git a/src/LPFormulator.cpp b/src/LPFormulator.cpp new file mode 100644 index 0000000000..67f2725155 --- /dev/null +++ b/src/LPFormulator.cpp @@ -0,0 +1,2217 @@ +/********************* */ +/*! \file LPFormulator.cpp + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + + **/ + +#include "LPFormulator.h" + +#include "DeepPolySoftmaxElement.h" +#include "GurobiWrapper.h" +#include "InfeasibleQueryException.h" +#include "Layer.h" +#include "MStringf.h" +#include "NLRError.h" +#include "Options.h" +#include "TimeUtils.h" +#include "Vector.h" + +namespace NLR { + +LPFormulator::LPFormulator( LayerOwner *layerOwner ) + : _layerOwner( layerOwner ) + , _cutoffInUse( false ) + , _cutoffValue( 0 ) +{ +} + +LPFormulator::~LPFormulator() +{ +} + +double LPFormulator::solveLPRelaxation( GurobiWrapper &gurobi, + const Map &layers, + MinOrMax minOrMax, + String variableName, + unsigned lastLayer ) +{ + gurobi.resetModel(); + createLPRelaxation( layers, gurobi, lastLayer ); + return optimizeWithGurobi( gurobi, minOrMax, variableName, _cutoffValue ); +} + +double LPFormulator::optimizeWithGurobi( GurobiWrapper &gurobi, + MinOrMax minOrMax, + String variableName, + double cutoffValue, + std::atomic_bool *infeasible ) +{ + List terms; + terms.append( GurobiWrapper::Term( 1, variableName ) ); + + if ( minOrMax == MAX ) + gurobi.setObjective( terms ); + else + gurobi.setCost( terms ); + + gurobi.setTimeLimit( FloatUtils::infinity() ); + + gurobi.solve(); + + if ( gurobi.infeasible() ) + { + if ( infeasible ) + { + *infeasible = true; + return FloatUtils::infinity(); + } + else + throw InfeasibleQueryException(); + } + + if ( gurobi.cutoffOccurred() ) + return cutoffValue; + + if ( gurobi.optimal() ) + { + Map dontCare; + double result = 0; + gurobi.extractSolution( dontCare, result ); + return result; + } + else if ( gurobi.timeout() ) + { + return gurobi.getObjectiveBound(); + } + + throw NLRError( NLRError::UNEXPECTED_RETURN_STATUS_FROM_GUROBI ); +} + +void LPFormulator::optimizeBoundsWithIncrementalLpRelaxation( const Map &layers ) +{ + GurobiWrapper gurobi; + + List terms; + Map dontCare; + double lb = 0; + double ub = 0; + double currentLb = 0; + double currentUb = 0; + + unsigned tighterBoundCounter = 0; + unsigned signChanges = 0; + unsigned cutoffs = 0; + + struct timespec gurobiStart; + (void)gurobiStart; + struct timespec gurobiEnd; + (void)gurobiEnd; + + gurobiStart = TimeUtils::sampleMicro(); + + for ( unsigned i = 0; i < _layerOwner->getNumberOfLayers(); ++i ) + { + /* + Go over the layers, one by one. Each time encode the layer, + and then issue queries on each of its variables + */ + ASSERT( layers.exists( i ) ); + Layer *layer = layers[i]; + addLayerToModel( gurobi, layer, false ); + + for ( unsigned j = 0; j < layer->getSize(); ++j ) + { + if ( layer->neuronEliminated( j ) ) + continue; + + currentLb = layer->getLb( j ); + currentUb = layer->getUb( j ); + + if ( _cutoffInUse && ( currentLb >= _cutoffValue || currentUb <= _cutoffValue ) ) + continue; + + unsigned variable = layer->neuronToVariable( j ); + Stringf variableName( "x%u", variable ); + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, variableName ) ); + + // Maximize + gurobi.reset(); + gurobi.setObjective( terms ); + gurobi.solve(); + + if ( gurobi.infeasible() ) + throw InfeasibleQueryException(); + + if ( gurobi.cutoffOccurred() ) + { + ub = _cutoffValue; + } + else if ( gurobi.optimal() ) + { + gurobi.extractSolution( dontCare, ub ); + } + else if ( gurobi.timeout() ) + { + ub = gurobi.getObjectiveBound(); + } + else + { + throw NLRError( NLRError::UNEXPECTED_RETURN_STATUS_FROM_GUROBI ); + } + + // If the bound is tighter, store it + if ( ub < currentUb ) + { + gurobi.setUpperBound( variableName, ub ); + + if ( FloatUtils::isPositive( currentUb ) && !FloatUtils::isPositive( ub ) ) + ++signChanges; + + layer->setUb( j, ub ); + _layerOwner->receiveTighterBound( Tightening( variable, ub, Tightening::UB ) ); + ++tighterBoundCounter; + + if ( _cutoffInUse && ub < _cutoffValue ) + { + ++cutoffs; + continue; + } + } + + // Minimize + gurobi.reset(); + gurobi.setCost( terms ); + gurobi.solve(); + + if ( gurobi.infeasible() ) + throw InfeasibleQueryException(); + + if ( gurobi.cutoffOccurred() ) + { + lb = _cutoffValue; + } + else if ( gurobi.optimal() ) + { + gurobi.extractSolution( dontCare, lb ); + } + else if ( gurobi.timeout() ) + { + lb = gurobi.getObjectiveBound(); + } + else + { + throw NLRError( NLRError::UNEXPECTED_RETURN_STATUS_FROM_GUROBI ); + } + + // If the bound is tighter, store it + if ( lb > currentLb ) + { + gurobi.setLowerBound( variableName, lb ); + + if ( FloatUtils::isNegative( currentLb ) && !FloatUtils::isNegative( lb ) ) + ++signChanges; + + layer->setLb( j, lb ); + _layerOwner->receiveTighterBound( Tightening( variable, lb, Tightening::LB ) ); + ++tighterBoundCounter; + + if ( _cutoffInUse && lb >= _cutoffValue ) + { + ++cutoffs; + continue; + } + } + } + } + + gurobiEnd = TimeUtils::sampleMicro(); + + LPFormulator_LOG( + Stringf( "Number of tighter bounds found by Gurobi: %u. Sign changes: %u. Cutoffs: %u\n", + tighterBoundCounter, + signChanges, + cutoffs ) + .ascii() ); + LPFormulator_LOG( Stringf( "Seconds spent Gurobiing: %llu\n", + TimeUtils::timePassed( gurobiStart, gurobiEnd ) / 1000000 ) + .ascii() ); +} + +void LPFormulator::optimizeBoundsWithLpRelaxation( + const Map &layers, + bool backward, + const Map> &layerIndicesToParameters, + const Vector &polygonalTightenings ) +{ + unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); + + Map solverToIndex; + // Create a queue of free workers + // When a worker is working, it is popped off the queue, when it is done, it + // is added back to the queue. + SolverQueue freeSolvers( numberOfWorkers ); + for ( unsigned i = 0; i < numberOfWorkers; ++i ) + { + GurobiWrapper *gurobi = new GurobiWrapper(); + solverToIndex[gurobi] = i; + enqueueSolver( freeSolvers, gurobi ); + } + + boost::thread *threads = new boost::thread[numberOfWorkers]; + std::mutex mtx; + std::atomic_bool infeasible( false ); + + std::atomic_uint tighterBoundCounter( 0 ); + std::atomic_uint signChanges( 0 ); + std::atomic_uint cutoffs( 0 ); + + struct timespec gurobiStart; + (void)gurobiStart; + struct timespec gurobiEnd; + (void)gurobiEnd; + + gurobiStart = TimeUtils::sampleMicro(); + + unsigned startIndex = backward ? layers.size() - 1 : 0; + unsigned endIndex = backward ? 0 : layers.size(); + for ( unsigned layerIndex = startIndex; layerIndex != endIndex; + backward ? --layerIndex : ++layerIndex ) + { + LPFormulator_LOG( Stringf( "Tightening bound for layer %u...", layerIndex ).ascii() ); + Layer *layer = layers[layerIndex]; + + ThreadArgument argument( layer, + &layers, + std::ref( freeSolvers ), + std::ref( mtx ), + std::ref( infeasible ), + std::ref( tighterBoundCounter ), + std::ref( signChanges ), + std::ref( cutoffs ), + layer->getLayerIndex(), + layerIndex, + threads, + &solverToIndex ); + + // optimize every neuron of layer + optimizeBoundsOfNeuronsWithLpRelaxation( + argument, backward, layerIndicesToParameters, polygonalTightenings ); + LPFormulator_LOG( Stringf( "Tightening bound for layer %u - done", layerIndex ).ascii() ); + } + + for ( unsigned i = 0; i < numberOfWorkers; ++i ) + { + threads[i].join(); + } + + gurobiEnd = TimeUtils::sampleMicro(); + + LPFormulator_LOG( + Stringf( "Number of tighter bounds found by Gurobi: %u. Sign changes: %u. Cutoffs: %u\n", + tighterBoundCounter.load(), + signChanges.load(), + cutoffs.load() ) + .ascii() ); + LPFormulator_LOG( Stringf( "Seconds spent Gurobiing: %llu\n", + TimeUtils::timePassed( gurobiStart, gurobiEnd ) / 1000000 ) + .ascii() ); + + // Clean up + clearSolverQueue( freeSolvers ); + + if ( threads ) + { + delete[] threads; + threads = NULL; + } + + if ( infeasible ) + throw InfeasibleQueryException(); +} + +void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, + unsigned targetIndex ) +{ + unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); + + Map solverToIndex; + // Create a queue of free workers + // When a worker is working, it is popped off the queue, when it is done, it + // is added back to the queue. + SolverQueue freeSolvers( numberOfWorkers ); + for ( unsigned i = 0; i < numberOfWorkers; ++i ) + { + GurobiWrapper *gurobi = new GurobiWrapper(); + solverToIndex[gurobi] = i; + enqueueSolver( freeSolvers, gurobi ); + } + + boost::thread *threads = new boost::thread[numberOfWorkers]; + std::mutex mtx; + std::atomic_bool infeasible( false ); + + std::atomic_uint tighterBoundCounter( 0 ); + std::atomic_uint signChanges( 0 ); + std::atomic_uint cutoffs( 0 ); + + struct timespec gurobiStart; + (void)gurobiStart; + struct timespec gurobiEnd; + (void)gurobiEnd; + + gurobiStart = TimeUtils::sampleMicro(); + + Layer *layer = layers[targetIndex]; + + ThreadArgument argument( layer, + &layers, + std::ref( freeSolvers ), + std::ref( mtx ), + std::ref( infeasible ), + std::ref( tighterBoundCounter ), + std::ref( signChanges ), + std::ref( cutoffs ), + layers.size() - 1, + targetIndex, + threads, + &solverToIndex ); + + // optimize every neuron of layer + optimizeBoundsOfNeuronsWithLpRelaxation( argument, false ); + + for ( unsigned i = 0; i < numberOfWorkers; ++i ) + { + threads[i].join(); + } + + gurobiEnd = TimeUtils::sampleMicro(); + + LPFormulator_LOG( + Stringf( "Number of tighter bounds found by Gurobi: %u. Sign changes: %u. Cutoffs: %u\n", + tighterBoundCounter.load(), + signChanges.load(), + cutoffs.load() ) + .ascii() ); + LPFormulator_LOG( Stringf( "Seconds spent Gurobiing: %llu\n", + TimeUtils::timePassed( gurobiStart, gurobiEnd ) / 1000000 ) + .ascii() ); + + clearSolverQueue( freeSolvers ); + + if ( infeasible ) + throw InfeasibleQueryException(); +} + +void LPFormulator::optimizeBoundsOfNeuronsWithLpRelaxation( + ThreadArgument &args, + bool backward, + const Map> &layerIndicesToParameters, + const Vector &polygonalTightenings ) +{ + unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); + + // Time to wait if no idle worker is availble + boost::chrono::milliseconds waitTime( numberOfWorkers - 1 ); + + Layer *layer = args._layer; + const Map layers = *args._layers; + unsigned targetIndex = args._targetIndex; + unsigned lastIndexOfRelaxation = args._lastIndexOfRelaxation; + + const Map solverToIndex = *args._solverToIndex; + SolverQueue &freeSolvers = args._freeSolvers; + std::mutex &mtx = args._mtx; + std::atomic_bool &infeasible = args._infeasible; + std::atomic_uint &tighterBoundCounter = args._tighterBoundCounter; + std::atomic_uint &signChanges = args._signChanges; + std::atomic_uint &cutoffs = args._cutoffs; + boost::thread *threads = args._threads; + + double currentLb; + double currentUb; + + bool skipTightenLb = false; // If true, skip lower bound tightening + bool skipTightenUb = false; // If true, skip upper bound tightening + + // declare simulations as local var to avoid a problem which can happen due to multi thread + // process. + const Vector> *simulations = + _layerOwner->getLayer( targetIndex )->getSimulations(); + + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( layer->neuronEliminated( i ) ) + continue; + + currentLb = layer->getLb( i ); + currentUb = layer->getUb( i ); + + if ( _cutoffInUse && ( currentLb >= _cutoffValue || currentUb <= _cutoffValue ) ) + continue; + + skipTightenLb = false; + skipTightenUb = false; + + // Loop for simulation + for ( const auto &simValue : ( *simulations ).get( i ) ) + { + if ( _cutoffInUse && _cutoffValue < simValue ) // If x_lower < 0 < x_sim, do not try to + // call tightning upper bound. + skipTightenUb = true; + + if ( _cutoffInUse && simValue < _cutoffValue ) // If x_sim < 0 < x_upper, do not try to + // call tightning lower bound. + skipTightenLb = true; + + if ( skipTightenUb && skipTightenLb ) + break; + } + + // If no tightning is needed, continue + if ( skipTightenUb && skipTightenLb ) + { + LPFormulator_LOG( + Stringf( + "Skip tightening lower and upper bounds for layer %d index %u", targetIndex, i ) + .ascii() ); + continue; + } + else if ( skipTightenUb ) + { + LPFormulator_LOG( + Stringf( "Skip tightening upper bound for layer %u index %u", targetIndex, i ) + .ascii() ); + } + else if ( skipTightenLb ) + { + LPFormulator_LOG( + Stringf( "Skip tightening lower bound for layer %u index %u", targetIndex, i ) + .ascii() ); + } + + if ( infeasible ) + { + // infeasibility is derived, interupt all active threads + for ( unsigned i = 0; i < numberOfWorkers; ++i ) + { + threads[i].interrupt(); + threads[i].join(); + } + + // Clean up + clearSolverQueue( freeSolvers ); + + if ( threads ) + { + delete[] threads; + threads = NULL; + } + + throw InfeasibleQueryException(); + } + + // Wait until there is an idle solver + GurobiWrapper *freeSolver; + while ( !freeSolvers.pop( freeSolver ) ) + boost::this_thread::sleep_for( waitTime ); + + freeSolver->resetModel(); + + mtx.lock(); + if ( backward ) + createLPRelaxationAfter( layers, + *freeSolver, + lastIndexOfRelaxation, + layerIndicesToParameters, + polygonalTightenings ); + else + createLPRelaxation( layers, + *freeSolver, + lastIndexOfRelaxation, + layerIndicesToParameters, + polygonalTightenings ); + mtx.unlock(); + + // spawn a thread to tighten the bounds for the current variable + ThreadArgument argument( freeSolver, + layer, + i, + currentLb, + currentUb, + _cutoffInUse, + _cutoffValue, + _layerOwner, + std::ref( freeSolvers ), + std::ref( mtx ), + std::ref( infeasible ), + std::ref( tighterBoundCounter ), + std::ref( signChanges ), + std::ref( cutoffs ), + skipTightenLb, + skipTightenUb ); + + if ( numberOfWorkers == 1 ) + tightenSingleVariableBoundsWithLPRelaxation( argument ); + else + threads[solverToIndex[freeSolver]] = + boost::thread( tightenSingleVariableBoundsWithLPRelaxation, argument ); + } +} + +void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument &argument ) +{ + try + { + GurobiWrapper *gurobi = argument._gurobi; + Layer *layer = argument._layer; + unsigned index = argument._index; + double currentLb = argument._currentLb; + double currentUb = argument._currentUb; + bool cutoffInUse = argument._cutoffInUse; + double cutoffValue = argument._cutoffValue; + LayerOwner *layerOwner = argument._layerOwner; + SolverQueue &freeSolvers = argument._freeSolvers; + std::mutex &mtx = argument._mtx; + std::atomic_bool &infeasible = argument._infeasible; + std::atomic_uint &tighterBoundCounter = argument._tighterBoundCounter; + std::atomic_uint &signChanges = argument._signChanges; + std::atomic_uint &cutoffs = argument._cutoffs; + bool skipTightenLb = argument._skipTightenLb; + bool skipTightenUb = argument._skipTightenUb; + + LPFormulator_LOG( + Stringf( "Tightening bounds for layer %u index %u", layer->getLayerIndex(), index ) + .ascii() ); + + unsigned variable = layer->neuronToVariable( index ); + Stringf variableName( "x%u", variable ); + + if ( !skipTightenUb ) + { + LPFormulator_LOG( Stringf( "Computing upperbound..." ).ascii() ); + double ub = optimizeWithGurobi( + *gurobi, MinOrMax::MAX, variableName, cutoffValue, &infeasible ) + + GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT; + ; + LPFormulator_LOG( Stringf( "Upperbound computed %f", ub ).ascii() ); + + // Store the new bound if it is tighter + if ( ub < currentUb ) + { + if ( FloatUtils::isPositive( currentUb ) && !FloatUtils::isPositive( ub ) ) + ++signChanges; + + mtx.lock(); + layer->setUb( index, ub ); + layerOwner->receiveTighterBound( Tightening( variable, ub, Tightening::UB ) ); + mtx.unlock(); + + ++tighterBoundCounter; + + if ( cutoffInUse && ub < cutoffValue ) + { + ++cutoffs; + enqueueSolver( freeSolvers, gurobi ); + return; + } + } + } + + if ( !skipTightenLb ) + { + LPFormulator_LOG( Stringf( "Computing lowerbound..." ).ascii() ); + gurobi->reset(); + double lb = optimizeWithGurobi( + *gurobi, MinOrMax::MIN, variableName, cutoffValue, &infeasible ) - + GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT; + LPFormulator_LOG( Stringf( "Lowerbound computed: %f", lb ).ascii() ); + // Store the new bound if it is tighter + if ( lb > currentLb ) + { + if ( FloatUtils::isNegative( currentLb ) && !FloatUtils::isNegative( lb ) ) + ++signChanges; + + mtx.lock(); + layer->setLb( index, lb ); + layerOwner->receiveTighterBound( Tightening( variable, lb, Tightening::LB ) ); + mtx.unlock(); + ++tighterBoundCounter; + + if ( cutoffInUse && lb > cutoffValue ) + ++cutoffs; + } + } + enqueueSolver( freeSolvers, gurobi ); + } + catch ( boost::thread_interrupted & ) + { + enqueueSolver( argument._freeSolvers, argument._gurobi ); + } +} + +void LPFormulator::createLPRelaxation( + const Map &layers, + GurobiWrapper &gurobi, + unsigned lastLayer, + const Map> &layerIndicesToParameters, + const Vector &polygonalTightenings ) +{ + for ( const auto &layer : layers ) + { + unsigned currentLayerIndex = layer.second->getLayerIndex(); + if ( currentLayerIndex > lastLayer ) + continue; + + if ( layerIndicesToParameters.empty() ) + addLayerToModel( gurobi, layer.second, false ); + else + { + const Vector ¤tLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; + addLayerToParameterisedModel( gurobi, layer.second, false, currentLayerCoeffs ); + } + } + addPolyognalTighteningsToLpRelaxation( gurobi, layers, 0, lastLayer, polygonalTightenings ); +} + +void LPFormulator::createLPRelaxationAfter( + const Map &layers, + GurobiWrapper &gurobi, + unsigned firstLayer, + const Map> &layerIndicesToParameters, + const Vector &polygonalTightenings ) +{ + unsigned depth = GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH; + std::priority_queue, std::greater> layersToAdd; + Map layerToDepth; + + layersToAdd.push( firstLayer ); + layerToDepth[firstLayer] = 0; + while ( !layersToAdd.empty() ) + { + unsigned currentLayerIndex = layersToAdd.top(); + Layer *currentLayer = layers[currentLayerIndex]; + unsigned currentDepth = layerToDepth[currentLayerIndex]; + layersToAdd.pop(); + if ( currentDepth > depth ) + continue; + else + { + if ( layerIndicesToParameters.empty() ) + addLayerToModel( gurobi, currentLayer, true ); + else + { + const Vector ¤tLayerCoeffs = + layerIndicesToParameters[currentLayerIndex]; + addLayerToParameterisedModel( gurobi, currentLayer, true, currentLayerCoeffs ); + } + + for ( const auto &nextLayer : currentLayer->getSuccessorLayers() ) + { + if ( layerToDepth.exists( nextLayer ) ) + continue; + layersToAdd.push( nextLayer ); + layerToDepth[nextLayer] = currentDepth + 1; + } + } + } + addPolyognalTighteningsToLpRelaxation( + gurobi, layers, firstLayer, layersToAdd.top(), polygonalTightenings ); +} + +void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + switch ( layer->getLayerType() ) + { + case Layer::INPUT: + addInputLayerToLpRelaxation( gurobi, layer ); + break; + + case Layer::RELU: + addReluLayerToLpRelaxation( gurobi, layer, createVariables ); + break; + + case Layer::WEIGHTED_SUM: + addWeightedSumLayerToLpRelaxation( gurobi, layer, createVariables ); + break; + + case Layer::ROUND: + addRoundLayerToLpRelaxation( gurobi, layer, createVariables ); + break; + + case Layer::LEAKY_RELU: + addLeakyReluLayerToLpRelaxation( gurobi, layer, createVariables ); + break; + + case Layer::ABSOLUTE_VALUE: + addAbsoluteValueLayerToLpRelaxation( gurobi, layer, createVariables ); + break; + + case Layer::SIGN: + addSignLayerToLpRelaxation( gurobi, layer, createVariables ); + break; + + case Layer::MAX: + addMaxLayerToLpRelaxation( gurobi, layer, createVariables ); + break; + + case Layer::SIGMOID: + addSigmoidLayerToLpRelaxation( gurobi, layer, createVariables ); + break; + + case Layer::SOFTMAX: + addSoftmaxLayerToLpRelaxation( gurobi, layer, createVariables ); + break; + + case Layer::BILINEAR: + addBilinearLayerToLpRelaxation( gurobi, layer, createVariables ); + break; + + default: + throw NLRError( NLRError::LAYER_TYPE_NOT_SUPPORTED, "LPFormulator" ); + break; + } +} + +void LPFormulator::addInputLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + unsigned variable = layer->neuronToVariable( i ); + gurobi.addVariable( Stringf( "x%u", variable ), layer->getLb( i ), layer->getUb( i ) ); + } +} + +void LPFormulator::addReluLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = sourceValue > 0 ? sourceValue : 0; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), 0, layer->getUb( i ) ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The ReLU is active, y = x + if ( sourceLb < 0 ) + sourceLb = 0; + + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // The ReLU is inactive, y = 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else + { + /* + The phase of this ReLU is not yet fixed. + + For y = ReLU(x), we add the following triangular relaxation: + + 1. y >= 0 + 2. y >= x + 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) + */ + + // y >= 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= x, i.e. y - x >= 0 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + /* + u ul + y <= ----- x - ----- + u - l u - l + */ + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -sourceUb / ( sourceUb - sourceLb ), + Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, + ( -sourceUb * sourceLb ) / ( sourceUb - sourceLb ) ); + } + } + } +} + +void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = FloatUtils::round( sourceValue ); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + double ub = std::min( FloatUtils::round( sourceUb ), layer->getUb( i ) ); + double lb = std::max( FloatUtils::round( sourceLb ), layer->getLb( i ) ); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // If u = l: y = round(u) + if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) + { + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, ub ); + } + + else + { + List terms; + // y <= x + 0.5, i.e. y - x <= 0.5 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, 0.5 ); + + // y >= x - 0.5, i.e. y - x >= -0.5 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, -0.5 ); + } + } + } +} + +void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = sourceValue > 0 ? sourceValue : -sourceValue; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The AbsoluteValue is active, y = x + if ( sourceLb < 0 ) + sourceLb = 0; + + double ub = std::min( sourceUb, layer->getUb( i ) ); + double lb = std::max( sourceLb, layer->getLb( i ) ); + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + double ub = std::min( -sourceLb, layer->getUb( i ) ); + double lb = std::max( -sourceUb, layer->getLb( i ) ); + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // The AbsoluteValue is inactive, y = -x, i.e. y + x = 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else + { + double ub = std::min( std::max( -sourceLb, sourceUb ), layer->getUb( i ) ); + double lb = std::max( 0.0, layer->getLb( i ) ); + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). + // y >= 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y <= max(-lb, ub) + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addLeqConstraint( terms, ub ); + } + } + } +} + +void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = SigmoidConstraint::sigmoid( sourceValue ); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + double sourceUbSigmoid = SigmoidConstraint::sigmoid( sourceUb ); + double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); + + double ub = std::min( sourceUbSigmoid, layer->getUb( i ) ); + double lb = std::max( sourceLbSigmoid, layer->getLb( i ) ); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // If u = l: y = sigmoid(u) + if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) + { + List terms; + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, ub ); + } + + else + { + List terms; + double lambda = ( ub - lb ) / ( sourceUb - sourceLb ); + double lambdaPrime = std::min( SigmoidConstraint::sigmoidDerivative( sourceLb ), + SigmoidConstraint::sigmoidDerivative( sourceUb ) ); + + // update lower bound + if ( FloatUtils::isPositive( sourceLb ) ) + { + // y >= lambda * (x - l) + sigmoid(lb), i.e. y - lambda * x >= sigmoid(lb) - + // lambda * l + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( + GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, sourceLbSigmoid - sourceLb * lambda ); + } + + else + { + // y >= lambda' * (x - l) + sigmoid(lb), i.e. y - lambda' * x >= sigmoid(lb) - + // lambda' * l + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( + GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, sourceLbSigmoid - sourceLb * lambdaPrime ); + } + + // update upper bound + if ( !FloatUtils::isPositive( sourceUb ) ) + { + // y <= lambda * (x - u) + sigmoid(ub), i.e. y - lambda * x <= sigmoid(ub) - + // lambda * u + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( + GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, sourceUbSigmoid - sourceUb * lambda ); + } + else + { + // y <= lambda' * (x - u) + sigmoid(ub), i.e. y - lambda' * x <= sigmoid(ub) - + // lambda' * u + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( + GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, sourceUbSigmoid - sourceUb * lambdaPrime ); + } + } + } + } +} + +void LPFormulator::addSignLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( layer->neuronEliminated( i ) ) + continue; + + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = FloatUtils::isNegative( sourceValue ) ? -1 : 1; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The Sign is positive, y = 1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), 1, 1 ); + } + else if ( FloatUtils::isNegative( sourceUb ) ) + { + // The Sign is negative, y = -1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, -1 ); + } + else + { + /* + The phase of this Sign is not yet fixed. + + For y = Sign(x), we add the following parallelogram relaxation: + + 1. y >= -1 + 2. y <= -1 + 3. y is below the line the crosses (x.lb,-1) and (0,1) + 4. y is above the line the crosses (0,-1) and (x.ub,1) + */ + + // -1 <= y <= 1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, 1 ); + + /* + 2 + y <= ----- x + 1 + - l + */ + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( 2.0 / sourceLb, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, 1 ); + + /* + 2 + y >= ----- x - 1 + u + */ + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( + GurobiWrapper::Term( -2.0 / sourceUb, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, -1 ); + } + } +} + + +void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( layer->neuronEliminated( i ) ) + continue; + + unsigned targetVariable = layer->neuronToVariable( i ); + gurobi.addVariable( + Stringf( "x%u", targetVariable ), layer->getLb( i ), layer->getUb( i ) ); + + List sources = layer->getActivationSources( i ); + + bool haveFixedSourceValue = false; + double maxFixedSourceValue = FloatUtils::negativeInfinity(); + + double maxConcreteUb = FloatUtils::negativeInfinity(); + + List terms; + + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + haveFixedSourceValue = true; + double value = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + if ( value > maxFixedSourceValue ) + maxFixedSourceValue = value; + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + // Target is at least source: target - source >= 0 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // Find maximal concrete upper bound + if ( sourceUb > maxConcreteUb ) + maxConcreteUb = sourceUb; + } + + if ( haveFixedSourceValue && ( maxConcreteUb < maxFixedSourceValue ) ) + { + // At least one of the sources has a fixed value, + // and this fixed value dominates other sources. + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, maxFixedSourceValue ); + } + else + { + // If we have a fixed value, it's a lower bound + if ( haveFixedSourceValue ) + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addGeqConstraint( terms, maxFixedSourceValue ); + } + + // Target must be smaller than greatest concrete upper bound + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addLeqConstraint( terms, maxConcreteUb ); + } + } +} + +void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( layer->neuronEliminated( i ) ) + continue; + + Set handledInputNeurons; + List sources = layer->getActivationSources( i ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceMids; + Vector targetLbs; + Vector targetUbs; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceMids.append( ( sourceLb + sourceUb ) / 2 ); + targetLbs.append( layer->getLb( i ) ); + targetUbs.append( layer->getUb( i ) ); + } + + // Find the index of i in the softmax + unsigned index = 0; + for ( const auto &source : sources ) + { + if ( handledInputNeurons.exists( source._neuron ) ) + ++index; + else + { + handledInputNeurons.insert( source._neuron ); + break; + } + } + + double ub = + std::min( Layer::linearUpperBound( sourceLbs, sourceUbs, index ), layer->getUb( i ) ); + double lb = + std::max( Layer::linearLowerBound( sourceLbs, sourceUbs, index ), layer->getLb( i ) ); + targetLbs[index] = lb; + targetUbs[index] = ub; + + unsigned targetVariable = layer->neuronToVariable( i ); + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + double bias; + SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); + List terms; + if ( FloatUtils::areEqual( lb, ub ) ) + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, ub ); + } + else + { + // Compute symbolic bound + if ( boundType == SoftmaxBoundType::LOG_SUM_EXP_DECOMPOSITION ) + { + bool useLSE2 = false; + for ( const auto &lb : targetLbs ) + { + if ( lb > GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD ) + useLSE2 = true; + } + unsigned inputIndex = 0; + if ( !useLSE2 ) + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = Layer::LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dldj = Layer::dLSELowerBound( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( + GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addGeqConstraint( terms, bias ); + } + else + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dldj = Layer::dLSELowerBound2( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( + GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addGeqConstraint( terms, bias ); + } + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + inputIndex = 0; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dudj = Layer::dLSEUpperbound( + sourceMids, targetLbs, targetUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addLeqConstraint( terms, bias ); + } + else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = Layer::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + unsigned inputIndex = 0; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dldj = + Layer::dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addGeqConstraint( terms, bias ); + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = Layer::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); + inputIndex = 0; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dudj = + Layer::dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addLeqConstraint( terms, bias ); + } + } + } +} + +void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + Vector sourceNeurons; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + + sourceNeurons.append( sourceNeuron ); + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + double targetValue = sourceValues[0] * sourceValues[1]; + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + continue; + } + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // Lower bound: out >= l_y * x + l_x * y - l_x * l_y + List terms; + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( + -sourceLbs[1], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( + -sourceLbs[0], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addGeqConstraint( terms, -sourceLbs[0] * sourceLbs[1] ); + + // Upper bound: out <= u_y * x + l_x * y - l_x * u_y + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( + -sourceUbs[1], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( + -sourceLbs[0], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addLeqConstraint( terms, -sourceLbs[0] * sourceUbs[1] ); + } + } +} + +void LPFormulator::addWeightedSumLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + if ( createVariables ) + { + for ( const auto &sourceLayerPair : layer->getSourceLayers() ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerPair.first ); + unsigned sourceLayerSize = sourceLayerPair.second; + + for ( unsigned j = 0; j < sourceLayerSize; ++j ) + { + if ( !sourceLayer->neuronEliminated( j ) ) + { + Stringf sourceVariableName( "x%u", sourceLayer->neuronToVariable( j ) ); + if ( !gurobi.containsVariable( sourceVariableName ) ) + { + gurobi.addVariable( + sourceVariableName, sourceLayer->getLb( j ), sourceLayer->getUb( j ) ); + } + } + } + } + } + + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned variable = layer->neuronToVariable( i ); + + gurobi.addVariable( Stringf( "x%u", variable ), layer->getLb( i ), layer->getUb( i ) ); + + List terms; + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", variable ) ) ); + + double bias = -layer->getBias( i ); + + for ( const auto &sourceLayerPair : layer->getSourceLayers() ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerPair.first ); + unsigned sourceLayerSize = sourceLayerPair.second; + + for ( unsigned j = 0; j < sourceLayerSize; ++j ) + { + double weight = layer->getWeight( sourceLayerPair.first, j, i ); + if ( !sourceLayer->neuronEliminated( j ) ) + { + Stringf sourceVariableName( "x%u", sourceLayer->neuronToVariable( j ) ); + terms.append( GurobiWrapper::Term( weight, sourceVariableName ) ); + } + else + { + bias -= weight * sourceLayer->getEliminatedNeuronValue( j ); + } + } + } + + gurobi.addEqConstraint( terms, bias ); + } + } +} + +void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + double slope = layer->getAlpha(); + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = sourceValue > 0 ? sourceValue : 0; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + gurobi.addVariable( + Stringf( "x%u", targetVariable ), layer->getLb( i ), layer->getUb( i ) ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The LeakyReLU is active, y = x + + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // The LeakyReLU is inactive, y = alpha * x + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else + { + double width = sourceUb - sourceLb; + double weight = ( sourceUb - slope * sourceLb ) / width; + double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; + + /* + The phase of this LeakyReLU is not yet fixed. + For y = LeakyReLU(x), we add the following triangular relaxation: + 1. y >= alpha * x + 2. y >= x + 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) + */ + + // y >= alpha * x + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= x, i.e. y - x >= 0 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -weight, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, bias ); + } + } + } +} + +void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ) +{ + switch ( layer->getLayerType() ) + { + case Layer::RELU: + addReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); + break; + + case Layer::LEAKY_RELU: + addLeakyReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); + break; + + case Layer::SIGN: + addSignLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); + break; + + case Layer::BILINEAR: + addBilinearLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); + break; + + default: + addLayerToModel( gurobi, layer, createVariables ); + break; + } +} + +void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ) +{ + double coeff = coeffs[0]; + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = sourceValue > 0 ? sourceValue : 0; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), 0, layer->getUb( i ) ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The ReLU is active, y = x + if ( sourceLb < 0 ) + sourceLb = 0; + + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // The ReLU is inactive, y = 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else + { + /* + The phase of this ReLU is not yet fixed. + + For y = ReLU(x), we add the following relaxation: + + 1. y >= 0 + 2. y >= x + 2. y >= coeff * x + 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) + */ + + // y >= 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= x, i.e. y - x >= 0. + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= coeff * x, i.e. y - coeff * x >= 0 (varies continuously between y >= 0 and + // y >= alpha * x). + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -coeff, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + /* + u ul + y <= ----- x - ----- + u - l u - l + */ + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -sourceUb / ( sourceUb - sourceLb ), + Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, + ( -sourceUb * sourceLb ) / ( sourceUb - sourceLb ) ); + } + } + } +} + +void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( layer->neuronEliminated( i ) ) + continue; + + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = FloatUtils::isNegative( sourceValue ) ? -1 : 1; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The Sign is positive, y = 1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), 1, 1 ); + } + else if ( FloatUtils::isNegative( sourceUb ) ) + { + // The Sign is negative, y = -1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, -1 ); + } + else + { + /* + The phase of this Sign is not yet fixed. + + For y = Sign(x), we add the following parallelogram relaxation: + + 1. y >= -1 + 2. y <= -1 + 3. y is below the line the crosses (x.lb,-1) and (0,1) + 4. y is above the line the crosses (0,-1) and (x.ub,1) + */ + + // -1 <= y <= 1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, 1 ); + + /* + 2 + y <= ----- * coeff[0] * x + 1 + - l + Varies continuously between y <= 1 and y <= -2/l * x + 1. + */ + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( 2.0 / sourceLb * coeffs[0], + Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, 1 ); + + /* + 2 + y >= ----- * coeffs[1] * x - 1 + u + Varies continuously between y >= -1 and y >= 2/u * x - 1. + */ + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -2.0 / sourceUb * coeffs[1], + Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, -1 ); + } + } +} + +void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ) +{ + double slope = layer->getAlpha(); + double coeff = coeffs[0]; + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = sourceValue > 0 ? sourceValue : 0; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + gurobi.addVariable( + Stringf( "x%u", targetVariable ), layer->getLb( i ), layer->getUb( i ) ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The LeakyReLU is active, y = x + + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // The LeakyReLU is inactive, y = alpha * x + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else + { + double width = sourceUb - sourceLb; + double weight = ( sourceUb - slope * sourceLb ) / width; + double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; + + /* + The phase of this LeakyReLU is not yet fixed. + For y = LeakyReLU(x), we add the following relaxation: + 1. y >= ((1 - alpha) * coeff + alpha) * x (varies continuously between y >= + alpha * x and y >= x). + 2. y >= x + 3. y >= alpha * x + 4. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) + */ + + // y >= ((1 - alpha) * coeff + alpha) * x + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope - ( 1 - slope ) * coeff, + Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= x, i.e. y - x >= 0 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= alpha * x, i.e. y - alpha * x >= 0 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -weight, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, bias ); + } + } + } +} + +void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + Vector sourceNeurons; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + + sourceNeurons.append( sourceNeuron ); + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + double targetValue = sourceValues[0] * sourceValues[1]; + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + continue; + } + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // Billinear linear relaxation (arXiv:2405.21063v2 [cs.LG]) + // Lower bound: out >= a_l * x + b_l * y + c_l, where + // a_l = alpha1 * l_y + ( 1 - alpha1 ) * u_y + // b_l = alpha1 * l_x + ( 1 - alpha1 ) * u_x + // c_l = -alpha1 * l_x * l_y - ( 1 - alpha1 ) * u_x * u_y + + // Upper bound: out <= a_u * x + b_u * y + c_u, where + // a_u = alpha2 * u_y + ( 1 - alpha2 ) * l_y + // b_u = alpha2 * l_x + ( 1 - alpha2 ) * u_x + // c_u = -alpha2 * -l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + + List terms; + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( + -coeffs[0] * sourceLbs[1] - ( 1 - coeffs[0] ) * sourceUbs[1], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( + -coeffs[0] * sourceLbs[0] - ( 1 - coeffs[0] ) * sourceUbs[0], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addGeqConstraint( terms, + -coeffs[0] * sourceLbs[0] * sourceLbs[1] - + ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1] ); + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( + -coeffs[1] * sourceUbs[1] - ( 1 - coeffs[1] ) * sourceLbs[1], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( + -coeffs[1] * sourceLbs[0] - ( 1 - coeffs[1] ) * sourceUbs[0], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addLeqConstraint( terms, + -coeffs[1] * sourceLbs[0] * sourceUbs[1] - + ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1] ); + } + } +} + +void LPFormulator::addPolyognalTighteningsToLpRelaxation( + GurobiWrapper &gurobi, + const Map &layers, + unsigned firstLayer, + unsigned lastLayer, + const Vector &polygonalTightenings ) +{ + List terms; + for ( const auto &tightening : polygonalTightenings ) + { + Map neuronToCoefficient = tightening._neuronToCoefficient; + PolygonalTightening::PolygonalBoundType type = tightening._type; + double value = tightening._value; + + bool outOfBounds = false; + for ( const auto &pair : neuronToCoefficient ) + { + unsigned currentLayerIndex = pair.first._layer; + if ( currentLayerIndex < firstLayer || currentLayerIndex > lastLayer ) + { + outOfBounds = true; + } + } + if ( outOfBounds ) + { + continue; + } + + terms.clear(); + for ( const auto &pair : neuronToCoefficient ) + { + unsigned currentLayerIndex = pair.first._layer; + unsigned i = pair.first._neuron; + double coeff = pair.second; + Layer *layer = layers[currentLayerIndex]; + + if ( !layer->neuronEliminated( i ) ) + { + unsigned variable = layer->neuronToVariable( i ); + String variableName = Stringf( "x%u", variable ); + if ( !gurobi.containsVariable( variableName ) ) + { + gurobi.addVariable( variableName, layer->getLb( i ), layer->getUb( i ) ); + } + terms.append( GurobiWrapper::Term( coeff, Stringf( "x%u", variable ) ) ); + } + else + { + value -= coeff * layer->getEliminatedNeuronValue( i ); + } + } + + if ( type == PolygonalTightening::UB ) + { + gurobi.addLeqConstraint( terms, value ); + } + else + { + gurobi.addGeqConstraint( terms, value ); + } + } +} + +void LPFormulator::setCutoff( double cutoff ) +{ + _cutoffInUse = true; + _cutoffValue = cutoff; +} + +} // namespace NLR diff --git a/src/Layer.cpp b/src/Layer.cpp new file mode 100644 index 0000000000..43f24e341b --- /dev/null +++ b/src/Layer.cpp @@ -0,0 +1,5162 @@ +/********************* */ +/*! \file Layer.cpp + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Ido Shmuel + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + + **/ + +#include "Layer.h" + +#include "Options.h" +#include "Query.h" +#include "SoftmaxConstraint.h" +#include "SymbolicBoundTighteningType.h" + +namespace NLR { + +Layer::~Layer() +{ + freeMemoryIfNeeded(); +} + +void Layer::setLayerOwner( LayerOwner *layerOwner ) +{ + _layerOwner = layerOwner; +} + +Layer::Layer( unsigned index, Type type, unsigned size, LayerOwner *layerOwner ) + : _layerIndex( index ) + , _type( type ) + , _size( size ) + , _layerOwner( layerOwner ) + , _bias( NULL ) + , _assignment( NULL ) + , _lb( NULL ) + , _ub( NULL ) + , _inputLayerSize( 0 ) + , _symbolicLb( NULL ) + , _symbolicUb( NULL ) + , _symbolicLowerBias( NULL ) + , _symbolicUpperBias( NULL ) + , _symbolicLbOfLb( NULL ) + , _symbolicUbOfLb( NULL ) + , _symbolicLbOfUb( NULL ) + , _symbolicUbOfUb( NULL ) +{ + allocateMemory(); +} + +void Layer::allocateMemory() +{ + if ( _type == WEIGHTED_SUM ) + { + _bias = new double[_size]; + std::fill_n( _bias, _size, 0 ); + } + + _lb = new double[_size]; + _ub = new double[_size]; + + std::fill_n( _lb, _size, 0 ); + std::fill_n( _ub, _size, 0 ); + + _assignment = new double[_size]; + + _simulations.assign( + _size, Vector( Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ) ) ); + + _inputLayerSize = ( _type == INPUT ) ? _size : _layerOwner->getLayer( 0 )->getSize(); + if ( Options::get()->getSymbolicBoundTighteningType() == + SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) + { + _symbolicLb = new double[_size * _inputLayerSize]; + _symbolicUb = new double[_size * _inputLayerSize]; + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + _symbolicLowerBias = new double[_size]; + _symbolicUpperBias = new double[_size]; + + std::fill_n( _symbolicLowerBias, _size, 0 ); + std::fill_n( _symbolicUpperBias, _size, 0 ); + + _symbolicLbOfLb = new double[_size]; + _symbolicUbOfLb = new double[_size]; + _symbolicLbOfUb = new double[_size]; + _symbolicUbOfUb = new double[_size]; + + std::fill_n( _symbolicLbOfLb, _size, 0 ); + std::fill_n( _symbolicUbOfLb, _size, 0 ); + std::fill_n( _symbolicLbOfUb, _size, 0 ); + std::fill_n( _symbolicUbOfUb, _size, 0 ); + } +} + +void Layer::setAssignment( const double *values ) +{ + ASSERT( _eliminatedNeurons.empty() ); + memcpy( _assignment, values, _size * sizeof( double ) ); +} + +const double *Layer::getAssignment() const +{ + return _assignment; +} + +double Layer::getAssignment( unsigned neuron ) const +{ + return _assignment[neuron]; +} + +void Layer::setSimulations( const Vector> *values ) +{ + _simulations = *values; +} + +const Vector> *Layer::getSimulations() const +{ + return &_simulations; +} + +void Layer::computeAssignment() +{ + ASSERT( _type != INPUT ); + + if ( _type == WEIGHTED_SUM ) + { + // Initialize to bias + memcpy( _assignment, _bias, sizeof( double ) * _size ); + + // Process each of the source layers + for ( auto &sourceLayerEntry : _sourceLayers ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerEntry.first ); + const double *sourceAssignment = sourceLayer->getAssignment(); + unsigned sourceSize = sourceLayerEntry.second; + const double *weights = _layerToWeights[sourceLayerEntry.first]; + + for ( unsigned i = 0; i < sourceSize; ++i ) + for ( unsigned j = 0; j < _size; ++j ) + _assignment[j] += ( sourceAssignment[i] * weights[i * _size + j] ); + } + } + + else if ( _type == RELU ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + double inputValue = + _layerOwner->getLayer( sourceIndex._layer )->getAssignment( sourceIndex._neuron ); + + _assignment[i] = FloatUtils::max( inputValue, 0 ); + } + } + + else if ( _type == ROUND ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + double inputValue = + _layerOwner->getLayer( sourceIndex._layer )->getAssignment( sourceIndex._neuron ); + + _assignment[i] = FloatUtils::round( inputValue ); + } + } + + else if ( _type == LEAKY_RELU ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + double inputValue = + _layerOwner->getLayer( sourceIndex._layer )->getAssignment( sourceIndex._neuron ); + ASSERT( _alpha > 0 && _alpha < 1 ); + _assignment[i] = FloatUtils::max( inputValue, _alpha * inputValue ); + } + } + + else if ( _type == ABSOLUTE_VALUE ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + double inputValue = + _layerOwner->getLayer( sourceIndex._layer )->getAssignment( sourceIndex._neuron ); + + _assignment[i] = FloatUtils::abs( inputValue ); + } + } + + else if ( _type == MAX ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + _assignment[i] = FloatUtils::negativeInfinity(); + + for ( const auto &input : _neuronToActivationSources[i] ) + { + double value = + _layerOwner->getLayer( input._layer )->getAssignment( input._neuron ); + if ( value > _assignment[i] ) + _assignment[i] = value; + } + } + } + + else if ( _type == SIGN ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + double inputValue = + _layerOwner->getLayer( sourceIndex._layer )->getAssignment( sourceIndex._neuron ); + + _assignment[i] = FloatUtils::isNegative( inputValue ) ? -1 : 1; + } + } + + else if ( _type == SIGMOID ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + double inputValue = + _layerOwner->getLayer( sourceIndex._layer )->getAssignment( sourceIndex._neuron ); + + _assignment[i] = 1 / ( 1 + std::exp( -inputValue ) ); + } + } + + else if ( _type == SOFTMAX ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + _assignment[i] = FloatUtils::negativeInfinity(); + + Vector inputs; + Vector outputs; + unsigned outputIndex = 0; + unsigned index = 0; + for ( const auto &input : _neuronToActivationSources[i] ) + { + if ( input._neuron == i ) + outputIndex = index; + double value = + _layerOwner->getLayer( input._layer )->getAssignment( input._neuron ); + inputs.append( value ); + ++index; + } + + SoftmaxConstraint::softmax( inputs, outputs ); + _assignment[i] = outputs[outputIndex]; + } + } + + else if ( _type == BILINEAR ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + _assignment[i] = 1; + for ( const auto &input : _neuronToActivationSources[i] ) + { + double value = + _layerOwner->getLayer( input._layer )->getAssignment( input._neuron ); + _assignment[i] *= value; + } + } + } + else + { + printf( "Error! Neuron type %u unsupported\n", _type ); + throw MarabouError( MarabouError::NETWORK_LEVEL_REASONER_ACTIVATION_NOT_SUPPORTED ); + } + + // Eliminated variables supersede anything else - no matter what + // was computed due to left-over weights, etc, their set values + // prevail. + for ( const auto &eliminated : _eliminatedNeurons ) + _assignment[eliminated.first] = eliminated.second; +} + +void Layer::computeSimulations() +{ + ASSERT( _type != INPUT ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + if ( _type == WEIGHTED_SUM ) + { + // Process each of the source layers + for ( auto &sourceLayerEntry : _sourceLayers ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerEntry.first ); + const Vector> *sourceSimulations = sourceLayer->getSimulations(); + + unsigned sourceSize = sourceLayerEntry.second; + const double *weights = _layerToWeights[sourceLayerEntry.first]; + + for ( unsigned i = 0; i < _size; ++i ) + { + for ( unsigned j = 0; j < simulationSize; ++j ) + _simulations[i][j] = _bias[i]; + } + + for ( unsigned i = 0; i < sourceSize; ++i ) + for ( unsigned j = 0; j < simulationSize; ++j ) + for ( unsigned k = 0; k < _size; ++k ) + _simulations[k][j] += + ( ( *sourceSimulations ).get( i ).get( j ) * weights[i * _size + k] ); + } + } + else if ( _type == RELU ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Vector &simulations = + ( *( _layerOwner->getLayer( sourceIndex._layer )->getSimulations() ) ) + .get( sourceIndex._neuron ); + for ( unsigned j = 0; j < simulationSize; ++j ) + _simulations[i][j] = FloatUtils::max( simulations.get( j ), 0 ); + } + } + else if ( _type == LEAKY_RELU ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Vector &simulations = + ( *( _layerOwner->getLayer( sourceIndex._layer )->getSimulations() ) ) + .get( sourceIndex._neuron ); + ASSERT( _alpha > 0 && _alpha < 1 ); + for ( unsigned j = 0; j < simulationSize; ++j ) + _simulations[i][j] = + FloatUtils::max( simulations.get( j ), _alpha * simulations.get( j ) ); + } + } + else if ( _type == ABSOLUTE_VALUE ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Vector &simulations = + ( *( _layerOwner->getLayer( sourceIndex._layer )->getSimulations() ) ) + .get( sourceIndex._neuron ); + for ( unsigned j = 0; j < simulationSize; ++j ) + _simulations[i][j] = FloatUtils::abs( simulations.get( j ) ); + } + } + else if ( _type == MAX ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + for ( unsigned j = 0; j < simulationSize; ++j ) + { + _simulations[i][j] = FloatUtils::negativeInfinity(); + + for ( const auto &input : _neuronToActivationSources[i] ) + { + // double value = ( *( _layerOwner->getLayer( input._layer )->getSimulations() ) + // )[input._neuron][j]; + double value = ( *( _layerOwner->getLayer( input._layer )->getSimulations() ) ) + .get( input._neuron ) + .get( j ); + if ( value > _simulations[i][j] ) + _simulations[i][j] = value; + } + } + } + } + else if ( _type == SIGN ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Vector &simulations = + ( *( _layerOwner->getLayer( sourceIndex._layer )->getSimulations() ) ).get( i ); + for ( unsigned j = 0; j < simulationSize; ++j ) + _simulations[i][j] = FloatUtils::isNegative( simulations.get( j ) ) ? -1 : 1; + } + } + else if ( _type == SIGMOID ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Vector &simulations = + ( *( _layerOwner->getLayer( sourceIndex._layer )->getSimulations() ) ).get( i ); + for ( unsigned j = 0; j < simulationSize; ++j ) + _simulations[i][j] = 1 / ( 1 + std::exp( -simulations.get( j ) ) ); + } + } + else if ( _type == ROUND ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Vector &simulations = + ( *( _layerOwner->getLayer( sourceIndex._layer )->getSimulations() ) ).get( i ); + for ( unsigned j = 0; j < simulationSize; ++j ) + _simulations[i][j] = FloatUtils::round( simulations.get( j ) ); + } + } + else if ( _type == SOFTMAX ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + for ( unsigned j = 0; j < simulationSize; ++j ) + { + _simulations[i][j] = FloatUtils::negativeInfinity(); + + Vector inputs; + Vector outputs; + unsigned outputIndex = 0; + unsigned index = 0; + + for ( const auto &input : _neuronToActivationSources[i] ) + { + if ( input._neuron == i ) + outputIndex = index; + double value = ( *( _layerOwner->getLayer( input._layer )->getSimulations() ) ) + .get( input._neuron ) + .get( j ); + inputs.append( value ); + ++index; + } + + SoftmaxConstraint::softmax( inputs, outputs ); + _simulations[i][j] = outputs[outputIndex]; + } + } + } + else if ( _type == BILINEAR ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + for ( unsigned j = 0; j < simulationSize; ++j ) + { + _simulations[i][j] = 1; + for ( const auto &input : _neuronToActivationSources[i] ) + { + double value = ( *( _layerOwner->getLayer( input._layer )->getSimulations() ) ) + .get( input._neuron ) + .get( j ); + _simulations[i][j] *= value; + } + } + } + } + else + { + printf( "Error! Neuron type %u unsupported\n", _type ); + throw MarabouError( MarabouError::NETWORK_LEVEL_REASONER_ACTIVATION_NOT_SUPPORTED ); + } + + // Eliminated variables supersede anything else - no matter what + // was computed due to left-over weights, etc, their set values + // prevail. + for ( const auto &eliminated : _eliminatedNeurons ) + { + for ( unsigned j = 0; j < simulationSize; ++j ) + _simulations[eliminated.first][j] = eliminated.second; + } +} + +void Layer::addSourceLayer( unsigned layerNumber, unsigned layerSize ) +{ + ASSERT( _type != INPUT ); + + if ( _sourceLayers.exists( layerNumber ) ) + return; + + _sourceLayers[layerNumber] = layerSize; + + if ( _type == WEIGHTED_SUM ) + { + _layerToWeights[layerNumber] = new double[layerSize * _size]; + _layerToPositiveWeights[layerNumber] = new double[layerSize * _size]; + _layerToNegativeWeights[layerNumber] = new double[layerSize * _size]; + + std::fill_n( _layerToWeights[layerNumber], layerSize * _size, 0 ); + std::fill_n( _layerToPositiveWeights[layerNumber], layerSize * _size, 0 ); + std::fill_n( _layerToNegativeWeights[layerNumber], layerSize * _size, 0 ); + } +} + +const Map &Layer::getSourceLayers() const +{ + return _sourceLayers; +} + +void Layer::addSuccessorLayer( unsigned layerNumber ) +{ + _successorLayers.insert( layerNumber ); +} + +const Set &Layer::getSuccessorLayers() const +{ + return _successorLayers; +} + +const double *Layer::getWeightMatrix( unsigned sourceLayer ) const +{ + ASSERT( _layerToWeights.exists( sourceLayer ) ); + return _layerToWeights[sourceLayer]; +} + +void Layer::removeSourceLayer( unsigned sourceLayer ) +{ + ASSERT( _sourceLayers.exists( sourceLayer ) ); + + delete[] _layerToWeights[sourceLayer]; + delete[] _layerToPositiveWeights[sourceLayer]; + delete[] _layerToNegativeWeights[sourceLayer]; + + _sourceLayers.erase( sourceLayer ); + _layerToWeights.erase( sourceLayer ); + _layerToPositiveWeights.erase( sourceLayer ); + _layerToNegativeWeights.erase( sourceLayer ); +} + +void Layer::setWeight( unsigned sourceLayer, + unsigned sourceNeuron, + unsigned targetNeuron, + double weight ) +{ + unsigned index = sourceNeuron * _size + targetNeuron; + _layerToWeights[sourceLayer][index] = weight; + + if ( weight > 0 ) + { + _layerToPositiveWeights[sourceLayer][index] = weight; + _layerToNegativeWeights[sourceLayer][index] = 0; + } + else + { + _layerToPositiveWeights[sourceLayer][index] = 0; + _layerToNegativeWeights[sourceLayer][index] = weight; + } +} + +double Layer::getWeight( unsigned sourceLayer, unsigned sourceNeuron, unsigned targetNeuron ) const +{ + unsigned index = sourceNeuron * _size + targetNeuron; + return _layerToWeights[sourceLayer][index]; +} + +double *Layer::getWeights( unsigned sourceLayerIndex ) const +{ + return _layerToWeights[sourceLayerIndex]; +} + +double *Layer::getPositiveWeights( unsigned sourceLayerIndex ) const +{ + return _layerToPositiveWeights[sourceLayerIndex]; +} + +double *Layer::getNegativeWeights( unsigned sourceLayerIndex ) const +{ + return _layerToNegativeWeights[sourceLayerIndex]; +} + +void Layer::setBias( unsigned neuron, double bias ) +{ + _bias[neuron] = bias; +} + +double Layer::getBias( unsigned neuron ) const +{ + return _bias[neuron]; +} + +double *Layer::getBiases() const +{ + return _bias; +} + +void Layer::addActivationSource( unsigned sourceLayer, + unsigned sourceNeuron, + unsigned targetNeuron ) +{ + ASSERT( _type == RELU || _type == LEAKY_RELU || _type == ABSOLUTE_VALUE || _type == MAX || + _type == ROUND || _type == SIGN || _type == SIGMOID || _type == SOFTMAX || + _type == BILINEAR ); + + if ( !_neuronToActivationSources.exists( targetNeuron ) ) + _neuronToActivationSources[targetNeuron] = List(); + + _neuronToActivationSources[targetNeuron].append( NeuronIndex( sourceLayer, sourceNeuron ) ); + + DEBUG( { + if ( _type == RELU || _type == LEAKY_RELU || _type == ABSOLUTE_VALUE || _type == SIGN || + _type == ROUND ) + ASSERT( _neuronToActivationSources[targetNeuron].size() == 1 ); + } ); +} + +List Layer::getActivationSources( unsigned neuron ) const +{ + return _neuronToActivationSources[neuron]; +} + +unsigned Layer::getSize() const +{ + return _size; +} + +Layer::Type Layer::getLayerType() const +{ + return _type; +} + +void Layer::setNeuronVariable( unsigned neuron, unsigned variable ) +{ + ASSERT( !_eliminatedNeurons.exists( neuron ) ); + + _neuronToVariable[neuron] = variable; + _variableToNeuron[variable] = neuron; +} + +void Layer::obtainCurrentBounds( const Query &inputQuery ) +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _neuronToVariable.exists( i ) ) + { + unsigned variable = _neuronToVariable[i]; + _lb[i] = inputQuery.getLowerBound( variable ); + _ub[i] = inputQuery.getUpperBound( variable ); + } + else + { + ASSERT( _eliminatedNeurons.exists( i ) ); + _lb[i] = _eliminatedNeurons[i]; + _ub[i] = _eliminatedNeurons[i]; + } + } +} + +void Layer::obtainCurrentBounds() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _neuronToVariable.exists( i ) ) + { + unsigned variable = _neuronToVariable[i]; + _lb[i] = _layerOwner->getTableau()->getLowerBound( variable ); + _ub[i] = _layerOwner->getTableau()->getUpperBound( variable ); + } + else + { + ASSERT( _eliminatedNeurons.exists( i ) ); + _lb[i] = _eliminatedNeurons[i]; + _ub[i] = _eliminatedNeurons[i]; + } + } +} + +double Layer::getLb( unsigned neuron ) const +{ + if ( _eliminatedNeurons.exists( neuron ) ) + return _eliminatedNeurons[neuron]; + + return _lb[neuron]; +} + +double Layer::getUb( unsigned neuron ) const +{ + if ( _eliminatedNeurons.exists( neuron ) ) + return _eliminatedNeurons[neuron]; + + return _ub[neuron]; +} + +double *Layer::getLbs() const +{ + return _lb; +} + +double *Layer::getUbs() const +{ + return _ub; +} + +void Layer::setLb( unsigned neuron, double bound ) +{ + ASSERT( !_eliminatedNeurons.exists( neuron ) ); + _lb[neuron] = bound; +} + +void Layer::setUb( unsigned neuron, double bound ) +{ + ASSERT( !_eliminatedNeurons.exists( neuron ) ); + _ub[neuron] = bound; +} + +void Layer::computeIntervalArithmeticBounds() +{ + ASSERT( _type != INPUT ); + + switch ( _type ) + { + case WEIGHTED_SUM: + computeIntervalArithmeticBoundsForWeightedSum(); + break; + + case RELU: + computeIntervalArithmeticBoundsForRelu(); + break; + + case ABSOLUTE_VALUE: + computeIntervalArithmeticBoundsForAbs(); + break; + + case SIGN: + computeIntervalArithmeticBoundsForSign(); + break; + + case ROUND: + computeIntervalArithmeticBoundsForRound(); + break; + + case LEAKY_RELU: + computeIntervalArithmeticBoundsForLeakyRelu(); + break; + + case SIGMOID: + computeIntervalArithmeticBoundsForSigmoid(); + break; + + case MAX: + computeIntervalArithmeticBoundsForMax(); + break; + + case SOFTMAX: + computeIntervalArithmeticBoundsForSoftmax(); + break; + + case BILINEAR: + computeIntervalArithmeticBoundsForBilinear(); + break; + + default: + printf( "Error! Activation type %u unsupported\n", _type ); + throw MarabouError( MarabouError::NETWORK_LEVEL_REASONER_ACTIVATION_NOT_SUPPORTED ); + break; + } +} + +void Layer::computeIntervalArithmeticBoundsForWeightedSum() +{ + double *newLb = new double[_size]; + double *newUb = new double[_size]; + + for ( unsigned i = 0; i < _size; ++i ) + { + newLb[i] = _bias[i]; + newUb[i] = _bias[i]; + } + + for ( const auto &sourceLayerEntry : _sourceLayers ) + { + unsigned sourceLayerIndex = sourceLayerEntry.first; + unsigned sourceLayerSize = sourceLayerEntry.second; + const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerIndex ); + const double *weights = _layerToWeights[sourceLayerIndex]; + + for ( unsigned i = 0; i < _size; ++i ) + { + for ( unsigned j = 0; j < sourceLayerSize; ++j ) + { + double previousLb = sourceLayer->getLb( j ); + double previousUb = sourceLayer->getUb( j ); + double weight = weights[j * _size + i]; + + if ( weight > 0 ) + { + newLb[i] += weight * previousLb; + newUb[i] += weight * previousUb; + } + else + { + newLb[i] += weight * previousUb; + newUb[i] += weight * previousLb; + } + } + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + if ( _lb[i] < newLb[i] ) + { + _lb[i] = newLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > newUb[i] ) + { + _ub[i] = newUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + + delete[] newLb; + delete[] newUb; +} + +void Layer::computeIntervalArithmeticBoundsForRelu() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + double lb = sourceLayer->getLb( sourceIndex._neuron ); + double ub = sourceLayer->getUb( sourceIndex._neuron ); + + if ( lb < 0 ) + lb = 0; + + if ( _lb[i] < lb ) + { + _lb[i] = lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > ub ) + { + _ub[i] = ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeIntervalArithmeticBoundsForAbs() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + double lb = sourceLayer->getLb( sourceIndex._neuron ); + double ub = sourceLayer->getUb( sourceIndex._neuron ); + + if ( lb > 0 ) + { + if ( _lb[i] < lb ) + { + _lb[i] = lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > ub ) + { + _ub[i] = ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + else if ( ub < 0 ) + { + if ( _lb[i] < -ub ) + { + _lb[i] = -ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > -lb ) + { + _ub[i] = -lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + else + { + // lb < 0 < ub + if ( _lb[i] < 0 ) + { + _lb[i] = 0; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > FloatUtils::max( ub, -lb ) ) + { + _ub[i] = FloatUtils::max( ub, -lb ); + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +void Layer::computeIntervalArithmeticBoundsForSign() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + double lb = sourceLayer->getLb( sourceIndex._neuron ); + double ub = sourceLayer->getUb( sourceIndex._neuron ); + + double newLb; + double newUb; + + if ( !FloatUtils::isNegative( lb ) ) + { + newLb = 1; + newUb = 1; + } + else if ( FloatUtils::isNegative( ub ) ) + { + newLb = -1; + newUb = -1; + } + else + { + newLb = -1; + newUb = 1; + } + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < newLb ) + { + _lb[i] = newLb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > newUb ) + { + _ub[i] = newUb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeIntervalArithmeticBoundsForLeakyRelu() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + double lb = sourceLayer->getLb( sourceIndex._neuron ); + double ub = sourceLayer->getUb( sourceIndex._neuron ); + + if ( lb > 0 ) + { + if ( _lb[i] < lb ) + { + _lb[i] = lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > ub ) + { + _ub[i] = ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + else if ( ub < 0 ) + { + if ( _lb[i] < _alpha * lb ) + { + _lb[i] = _alpha * lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > _alpha * ub ) + { + _ub[i] = _alpha * ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + else + { + // lb < 0 < ub + if ( _lb[i] < _alpha * lb ) + { + _lb[i] = _alpha * lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > ub ) + { + _ub[i] = ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +void Layer::computeIntervalArithmeticBoundsForSigmoid() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + double lb = sourceLayer->getLb( sourceIndex._neuron ); + double ub = sourceLayer->getUb( sourceIndex._neuron ); + + double lbSigmoid = SigmoidConstraint::sigmoid( lb ); + double ubSigmoid = SigmoidConstraint::sigmoid( ub ); + + + if ( _lb[i] < lbSigmoid ) + { + _lb[i] = lbSigmoid; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > ubSigmoid ) + { + _ub[i] = ubSigmoid; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeIntervalArithmeticBoundsForRound() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + double lb = sourceLayer->getLb( sourceIndex._neuron ); + double ub = sourceLayer->getUb( sourceIndex._neuron ); + + double lbRound = FloatUtils::round( lb ); + double ubRound = FloatUtils::round( ub ); + + + if ( _lb[i] < lbRound ) + { + _lb[i] = lbRound; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > ubRound ) + { + _ub[i] = ubRound; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeIntervalArithmeticBoundsForMax() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); + double maxLowerBound = FloatUtils::negativeInfinity(); + double maxUpperBound = FloatUtils::negativeInfinity(); + + Map sourceLbs; + Map sourceUbs; + unsigned counter = 0; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs[sourceIndex] = sourceLb; + sourceUbs[sourceIndex] = sourceUb; + + if ( maxLowerBound < sourceLb ) + { + indexOfMaxLowerBound = sourceIndex; + maxLowerBound = sourceLb; + } + if ( maxUpperBound < sourceUb ) + { + maxUpperBound = sourceUb; + } + ++counter; + } + + // The phase is fixed if the lower-bound of a source variable x_b is + // larger than the upper-bounds of the other source variables. + bool phaseFixed = true; + for ( const auto &sourceIndex : sources ) + { + if ( sourceIndex != indexOfMaxLowerBound && + FloatUtils::gt( sourceUbs[sourceIndex], maxLowerBound ) ) + { + phaseFixed = false; + break; + } + } + + if ( phaseFixed ) + { + // Phase fixed + // Concrete bound: lb_b <= x_f <= ub_b + if ( _lb[i] < maxLowerBound ) + { + _lb[i] = maxLowerBound; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > sourceUbs[indexOfMaxLowerBound] ) + { + _ub[i] = sourceUbs[indexOfMaxLowerBound]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + else + { + // MaxPool not fixed + // Concrete bounds: lb_b <= x_f <= maxUpperBound + if ( _lb[i] < maxLowerBound ) + { + _lb[i] = maxLowerBound; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > maxUpperBound ) + { + _ub[i] = maxUpperBound; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +void Layer::computeIntervalArithmeticBoundsForSoftmax() +{ + Set handledInputNeurons; + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + ASSERT( sourceLayer->getSize() == _size ); + + Vector sourceLbs; + Vector sourceUbs; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + } + + // Find the index of i in the softmax + unsigned index = 0; + for ( const auto &sourceIndex : sources ) + { + if ( handledInputNeurons.exists( sourceIndex._neuron ) ) + ++index; + else + { + handledInputNeurons.insert( sourceIndex._neuron ); + break; + } + } + + double lb = linearLowerBound( sourceLbs, sourceUbs, index ); + double ub = linearUpperBound( sourceLbs, sourceUbs, index ); + if ( _lb[i] < lb ) + { + _lb[i] = lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > ub ) + { + _ub[i] = ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeIntervalArithmeticBoundsForBilinear() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + ASSERT( sources.size() == 2 ); + + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + double value = sourceValues[0] * sourceValues[1]; + + if ( _lb[i] < value ) + { + _lb[i] = value; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > value ) + { + _ub[i] = value; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + continue; + } + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + + if ( _lb[i] < lb ) + { + _lb[i] = lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > ub ) + { + _ub[i] = ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeSymbolicBounds() +{ + switch ( _type ) + { + case INPUT: + computeSymbolicBoundsForInput(); + break; + + case WEIGHTED_SUM: + computeSymbolicBoundsForWeightedSum(); + break; + + case RELU: + computeSymbolicBoundsForRelu(); + break; + + case SIGN: + computeSymbolicBoundsForSign(); + break; + + case ABSOLUTE_VALUE: + computeSymbolicBoundsForAbsoluteValue(); + break; + + case LEAKY_RELU: + computeSymbolicBoundsForLeakyRelu(); + break; + + case ROUND: + computeSymbolicBoundsForRound(); + break; + + case SIGMOID: + computeSymbolicBoundsForSigmoid(); + break; + + case MAX: + computeSymbolicBoundsForMax(); + break; + + case SOFTMAX: + computeSymbolicBoundsForSoftmax(); + break; + + case BILINEAR: + computeSymbolicBoundsForBilinear(); + break; + + default: + computeSymbolicBoundsDefault(); + break; + } +} + +void Layer::computeSymbolicBoundsDefault() +{ + // This is the default operation, for layers that are not + // supported yet. The "symbolic" bounds computed are just the + // concrete bounds. + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + double lb; + double ub; + + if ( _eliminatedNeurons.exists( i ) ) + { + lb = _eliminatedNeurons[i]; + ub = _eliminatedNeurons[i]; + } + else + { + lb = _lb[i]; + ub = _ub[i]; + } + + _symbolicLowerBias[i] = lb; + _symbolicUpperBias[i] = ub; + + _symbolicLbOfLb[i] = lb; + _symbolicUbOfLb[i] = ub; + _symbolicLbOfUb[i] = lb; + _symbolicUbOfUb[i] = ub; + } +} + +void Layer::computeSymbolicBoundsForInput() +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + // For the input layer, the bounds are just the identity polynomials + for ( unsigned i = 0; i < _size; ++i ) + { + _symbolicLb[_size * i + i] = 1; + _symbolicUb[_size * i + i] = 1; + + _symbolicLowerBias[i] = 0; + _symbolicUpperBias[i] = 0; + + double lb = _lb[i]; + double ub = _ub[i]; + + if ( _eliminatedNeurons.exists( i ) ) + { + lb = _eliminatedNeurons[i]; + ub = _eliminatedNeurons[i]; + } + + _symbolicLbOfLb[i] = lb; + _symbolicUbOfLb[i] = ub; + _symbolicLbOfUb[i] = lb; + _symbolicUbOfUb[i] = ub; + } +} + +void Layer::computeSymbolicBoundsForRelu() +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + /* + There are two ways we can determine that a ReLU has become fixed: + + 1. If the ReLU's variable has been externally fixed + 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) + */ + PhaseStatus reluPhase = PHASE_NOT_FIXED; + + // Has the f variable been eliminated or fixed? + if ( FloatUtils::isPositive( _lb[i] ) ) + reluPhase = RELU_PHASE_ACTIVE; + else if ( FloatUtils::isZero( _ub[i] ) ) + reluPhase = RELU_PHASE_INACTIVE; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A ReLU initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) + { + reluPhase = RELU_PHASE_ACTIVE; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + reluPhase = RELU_PHASE_INACTIVE; + } + + if ( reluPhase == PHASE_NOT_FIXED ) + { + // If we got here, we know that lbLb < 0 and ubUb + // > 0 There are four possible cases, depending on + // whether ubLb and lbUb are negative or positive + // (see Neurify paper, page 14). + + // Upper bound + if ( _symbolicLbOfUb[i] <= 0 ) + { + // lbOfUb[i] < 0 < ubOfUb[i] + // Concretize the upper bound using the Ehler's-like approximation + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] = _symbolicUb[j * _size + i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] = _symbolicUpperBias[i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); + _symbolicUpperBias[i] -= _symbolicLbOfUb[i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); + } + + // Lower bound + if ( _symbolicUbOfLb[i] <= 0 ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] = 0; + + _symbolicLowerBias[i] = 0; + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] = _symbolicLb[j * _size + i] * _symbolicUbOfLb[i] / + ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); + + _symbolicLowerBias[i] = _symbolicLowerBias[i] * _symbolicUbOfLb[i] / + ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); + } + + _symbolicLbOfLb[i] = 0; + } + else + { + // The phase of this ReLU is fixed! + if ( reluPhase == RELU_PHASE_ACTIVE ) + { + // Active ReLU, bounds are propagated as is + } + else + { + // Inactive ReLU, returns zero + _symbolicLbOfLb[i] = 0; + _symbolicUbOfLb[i] = 0; + _symbolicLbOfUb[i] = 0; + _symbolicUbOfUb[i] = 0; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicLowerBias[i] = 0; + _symbolicUpperBias[i] = 0; + } + } + + if ( _symbolicLbOfUb[i] < 0 ) + _symbolicLbOfUb[i] = 0; + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeSymbolicBoundsForSign() +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + // Eliminate neurons are skipped + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + + continue; + } + + /* + There are two ways we can determine that a Sign has become fixed: + + 1. If the Sign's variable has been externally fixed + 2. lbLb >= 0 (Positive) or ubUb < 0 (Negative) + */ + PhaseStatus signPhase = PHASE_NOT_FIXED; + + // Has the f variable been eliminated or fixed? + if ( !FloatUtils::isNegative( _lb[i] ) ) + signPhase = SIGN_PHASE_POSITIVE; + else if ( FloatUtils::isNegative( _ub[i] ) ) + signPhase = SIGN_PHASE_NEGATIVE; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A Sign initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) + { + signPhase = SIGN_PHASE_POSITIVE; + } + else if ( FloatUtils::isNegative( sourceUb ) ) + { + signPhase = SIGN_PHASE_NEGATIVE; + } + + if ( signPhase == PHASE_NOT_FIXED ) + { + PhaseStatus upperSignPhase = PHASE_NOT_FIXED; + PhaseStatus lowerSignPhase = PHASE_NOT_FIXED; + + // If we got here, we know that lbLb < 0 and ubUb + // > 0 + + // Upper bound + if ( !FloatUtils::isNegative( _symbolicLbOfUb[i] ) ) + { + // The upper bound is strictly positive - turns into + // the constant 1 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] = 0; + + _symbolicUpperBias[i] = 1; + + upperSignPhase = SIGN_PHASE_POSITIVE; + } + else + { + // The upper bound's phase is not fixed, use the + // parallelogram approximation + double factor = -2.0 / _symbolicLbOfLb[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] *= factor; + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= factor; + _symbolicUpperBias[i] += 1; + } + + // Lower bound + if ( FloatUtils::isNegative( _symbolicUbOfLb[i] ) ) + { + // The lower bound is strictly negative - turns into + // the constant -1 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] = 0; + + _symbolicLowerBias[i] = -1; + + lowerSignPhase = SIGN_PHASE_NEGATIVE; + } + else + { + // The lower bound's phase is not fixed, use the + // parallelogram approximation + double factor = 2.0 / _symbolicUbOfUb[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= factor; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= factor; + _symbolicLowerBias[i] -= 1; + } + + if ( upperSignPhase == PHASE_NOT_FIXED ) + { + _symbolicUbOfUb[i] = 1; + _symbolicLbOfUb[i] = -1; + } + else + { + _symbolicUbOfUb[i] = 1; + _symbolicLbOfUb[i] = 1; + } + + if ( lowerSignPhase == PHASE_NOT_FIXED ) + { + _symbolicUbOfLb[i] = 1; + _symbolicLbOfLb[i] = -1; + } + else + { + _symbolicUbOfLb[i] = -1; + _symbolicLbOfLb[i] = -1; + } + } + else + { + // The phase of this Sign is fixed! + double constant = ( signPhase == SIGN_PHASE_POSITIVE ) ? 1 : -1; + + _symbolicLbOfLb[i] = constant; + _symbolicUbOfLb[i] = constant; + _symbolicLbOfUb[i] = constant; + _symbolicUbOfUb[i] = constant; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicLowerBias[i] = constant; + _symbolicUpperBias[i] = constant; + } + + if ( _symbolicLbOfLb[i] < -1 ) + _symbolicLbOfLb[i] = -1; + if ( _symbolicUbOfUb[i] > 1 ) + _symbolicUbOfUb[i] = 1; + + /* + We now have the tightest bounds we can for the sign + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeSymbolicBoundsForAbsoluteValue() +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + + continue; + } + + PhaseStatus absPhase = PHASE_NOT_FIXED; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + if ( sourceLb >= 0 ) + absPhase = ABS_PHASE_POSITIVE; + else if ( sourceUb <= 0 ) + absPhase = ABS_PHASE_NEGATIVE; + + if ( absPhase == PHASE_NOT_FIXED ) + { + // If we got here, we know that lbOfLb < 0 < ubOfUb. In this case, + // we do naive concretization: lb is 0, ub is the max between + // -lb and ub of the input neuron + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = 0; + _symbolicUb[j * _size + i] = 0; + } + + _symbolicLowerBias[i] = 0; + _symbolicUpperBias[i] = FloatUtils::max( -sourceLb, sourceUb ); + + _symbolicLbOfLb[i] = 0; + _symbolicUbOfLb[i] = _symbolicUpperBias[i]; + _symbolicLbOfUb[i] = 0; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + } + else + { + // The phase of this AbsoluteValueConstraint is fixed! + if ( absPhase == ABS_PHASE_POSITIVE ) + { + // Positive AbsoluteValue, bounds are propagated as is + } + else + { + // Negative AbsoluteValue, bounds are negated and flipped + double temp; + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + temp = _symbolicUb[j * _size + i]; + _symbolicUb[j * _size + i] = -_symbolicLb[j * _size + i]; + _symbolicLb[j * _size + i] = -temp; + } + + temp = _symbolicLowerBias[i]; + _symbolicLowerBias[i] = -_symbolicUpperBias[i]; + _symbolicUpperBias[i] = -temp; + + // Old lb, negated, is the new ub + temp = _symbolicLbOfLb[i]; + _symbolicLbOfLb[i] = -_symbolicUbOfUb[i]; + _symbolicUbOfUb[i] = -temp; + + temp = _symbolicUbOfLb[i]; + _symbolicUbOfLb[i] = -_symbolicLbOfUb[i]; + _symbolicLbOfUb[i] = -temp; + } + } + + // In extreme cases (constraint set externally), _symbolicLbOfLb + // could be negative - so adjust this + if ( _symbolicLbOfLb[i] < 0 ) + _symbolicLbOfLb[i] = 0; + + /* + We now have the tightest bounds we can for the abs + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeSymbolicBoundsForLeakyRelu() +{ + ASSERT( _alpha > 0 && _alpha < 1 ); + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + /* + There are two ways we can determine that a LeakyReLU has become fixed: + + 1. If the LeakyReLU's variable has been externally fixed + 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) + */ + PhaseStatus leakyReluPhase = PHASE_NOT_FIXED; + + // Has the f variable been eliminated or fixed? + if ( FloatUtils::isPositive( _lb[i] ) ) + leakyReluPhase = RELU_PHASE_ACTIVE; + else if ( FloatUtils::isZero( _ub[i] ) ) + leakyReluPhase = RELU_PHASE_INACTIVE; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A LeakyReLU initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) + { + leakyReluPhase = RELU_PHASE_ACTIVE; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + leakyReluPhase = RELU_PHASE_INACTIVE; + } + + if ( leakyReluPhase == PHASE_NOT_FIXED ) + { + // LeakyReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + // Concrete upper bound: x_f <= ub_b + double width = sourceUb - sourceLb; + double weight = ( sourceUb - _alpha * sourceLb ) / width; + + if ( _alpha <= 1 ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= weight; + } + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= weight; + _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; + + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We + // use the heuristic described in section 4.1 of + // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + // to set the value of lambda (either 0 or 1 is considered). + if ( sourceUb > sourceLb ) + { + // lambda = 1 + // Symbolic lower bound: x_f >= x_b + // Concrete lower bound: x_f >= sourceLb + + // Lower bounds are passed as is + } + else + { + // lambda = 1 + // Symbolic lower bound: x_f >= _alpha x_b + // Concrete lower bound: x_f >= 0 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= _alpha; + } + + _symbolicLowerBias[i] *= _alpha; + } + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= weight; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= weight; + _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; + + if ( sourceUb > sourceLb ) + { + // Upper bounds are passed as is + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= _alpha; + } + + _symbolicUpperBias[i] *= _alpha; + } + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + } + else + { + // The phase of this LeakyReLU is fixed! + if ( leakyReluPhase == RELU_PHASE_ACTIVE ) + { + // Positive LeakyReLU, bounds are propagated as is + } + else + { + // Negative LeakyReLU, bounds are multiplied by _alpha + _symbolicLbOfLb[i] *= _alpha; + _symbolicUbOfLb[i] *= _alpha; + _symbolicLbOfUb[i] *= _alpha; + _symbolicUbOfUb[i] *= _alpha; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= _alpha; + _symbolicLb[j * _size + i] *= _alpha; + } + + _symbolicLowerBias[i] *= _alpha; + _symbolicUpperBias[i] *= _alpha; + } + } + + if ( _symbolicUbOfUb[i] > sourceUb ) + _symbolicUbOfUb[i] = sourceUb; + if ( _symbolicLbOfLb[i] < _alpha * sourceLb ) + _symbolicLbOfLb[i] = _alpha * sourceLb; + + /* + We now have the tightest bounds we can for the leakyRelu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeSymbolicBoundsForSigmoid() +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A Sigmoid initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Bounds of lb, ub are the Sigmoids of source lb, ub + double sourceUbSigmoid = SigmoidConstraint::sigmoid( sourceUb ); + double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); + + // Case when the Sigmoid constraint is fixed + if ( FloatUtils::areEqual( sourceLb, sourceUb ) ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = 0; + _symbolicUb[j * _size + i] = 0; + } + + _symbolicLbOfUb[i] = sourceUbSigmoid; + _symbolicUbOfUb[i] = sourceUbSigmoid; + _symbolicLbOfLb[i] = sourceLbSigmoid; + _symbolicUbOfLb[i] = sourceLbSigmoid; + + _symbolicUpperBias[i] = sourceUbSigmoid; + _symbolicLowerBias[i] = sourceLbSigmoid; + } + + // Sigmoid not fixed + else + { + double lambda = ( _ub[i] - _lb[i] ) / ( sourceUb - sourceLb ); + double lambdaPrime = std::min( SigmoidConstraint::sigmoidDerivative( sourceLb ), + SigmoidConstraint::sigmoidDerivative( sourceUb ) ); + + // update lower bound + if ( FloatUtils::isPositive( sourceLb ) ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= lambda; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= lambda; + _symbolicLowerBias[i] += sourceLbSigmoid - lambda * sourceLb; + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= lambdaPrime; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= lambdaPrime; + _symbolicLowerBias[i] += sourceLbSigmoid - lambdaPrime * sourceLb; + } + + // update upper bound + if ( !FloatUtils::isPositive( sourceUb ) ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= lambda; + } + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= lambda; + _symbolicUpperBias[i] += sourceUbSigmoid - lambda * sourceUb; + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= lambdaPrime; + } + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= lambdaPrime; + _symbolicUpperBias[i] += sourceUbSigmoid - lambdaPrime * sourceUb; + } + + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + } + + if ( _symbolicLbOfLb[i] < -1 ) + _symbolicLbOfLb[i] = -1; + if ( _symbolicUbOfUb[i] > 1 ) + _symbolicUbOfUb[i] = 1; + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeSymbolicBoundsForRound() +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A Round initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + + // Bounds of lb, ub are the rounded values of source lb, ub + double sourceUbRound = FloatUtils::round( sourceUb ); + double sourceLbRound = FloatUtils::round( sourceLb ); + + _symbolicLbOfUb[i] = sourceUbRound; + _symbolicUbOfUb[i] = sourceUbRound; + _symbolicLbOfLb[i] = sourceLbRound; + _symbolicUbOfLb[i] = sourceLbRound; + + + // Case when the Round constraint is fixed + if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) + { + _symbolicUb[i] = 0; + _symbolicUpperBias[i] = sourceUbRound; + + _symbolicLb[i] = 0; + _symbolicLowerBias[i] = sourceLbRound; + } + + // Round not fixed + else + { + // Symbolic upper bound: x_f <= x_b + 0.5 + // Concrete upper bound: x_f <= round(ub_b) + + _symbolicUpperBias[i] += 0.5; + + // Symbolic lower bound: x_f >= x_b - 0.5 + // Concrete lower bound: x_f >= round(lb_b) + + _symbolicLowerBias[i] -= 0.5; + } + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeSymbolicBoundsForMax() +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); + double maxLowerBound = FloatUtils::negativeInfinity(); + double maxUpperBound = FloatUtils::negativeInfinity(); + + Map sourceLbs; + Map sourceUbs; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs[sourceIndex] = sourceLb; + sourceUbs[sourceIndex] = sourceUb; + + if ( maxLowerBound < sourceLb ) + { + indexOfMaxLowerBound = sourceIndex; + maxLowerBound = sourceLb; + } + if ( maxUpperBound < sourceUb ) + { + maxUpperBound = sourceUb; + } + } + + // The phase is fixed if the lower-bound of a source variable x_b is + // larger than the upper-bounds of the other source variables. + bool phaseFixed = true; + for ( const auto &sourceIndex : sources ) + { + if ( sourceIndex != indexOfMaxLowerBound && + FloatUtils::gt( sourceUbs[sourceIndex], maxLowerBound ) ) + { + phaseFixed = false; + break; + } + } + + if ( phaseFixed ) + { + // Phase fixed + // Symbolic bound: x_b <= x_f <= x_b + // Concrete bound: lb_b <= x_f <= ub_b + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; + } + _symbolicLowerBias[i] = + sourceLayer->getSymbolicLowerBias()[indexOfMaxLowerBound._neuron]; + _symbolicUpperBias[i] = + sourceLayer->getSymbolicUpperBias()[indexOfMaxLowerBound._neuron]; + + + _symbolicLbOfLb[i] = maxLowerBound; + _symbolicUbOfLb[i] = maxLowerBound; + _symbolicLbOfUb[i] = sourceUbs[indexOfMaxLowerBound]; + _symbolicUbOfUb[i] = sourceUbs[indexOfMaxLowerBound]; + } + else + { + // MaxPool not fixed + // Symbolic bounds: x_b <= x_f <= maxUpperBound + // Concrete bounds: lb_b <= x_f <= maxUpperBound + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; + _symbolicUb[j * _size + i] = 0; + } + _symbolicLowerBias[i] = + sourceLayer->getSymbolicLowerBias()[indexOfMaxLowerBound._neuron]; + _symbolicUpperBias[i] = maxUpperBound; + + _symbolicLbOfLb[i] = maxLowerBound; + _symbolicUbOfLb[i] = maxLowerBound; + _symbolicLbOfUb[i] = maxUpperBound; + _symbolicUbOfUb[i] = maxUpperBound; + } + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeSymbolicBoundsForSoftmax() +{ + std::fill_n( _symbolicLowerBias, _size, 0 ); + std::fill_n( _symbolicUpperBias, _size, 0 ); + + double *symbolicLb = new double[_size * _size]; + double *symbolicUb = new double[_size * _size]; + std::fill_n( symbolicLb, _size * _size, 0 ); + std::fill_n( symbolicUb, _size * _size, 0 ); + + double *_work = new double[_size * _size]; + std::fill_n( _work, _size * _size, 0 ); + + Set handledInputNeurons; + unsigned sourceLayerSize = _size; + SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + sourceLayerSize = sourceLayer->getSize(); + ASSERT( sourceLayerSize == _size ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceMids; + Vector targetLbs; + Vector targetUbs; + unsigned len = 0; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceMids.append( ( sourceLb + sourceUb ) / 2 ); + targetLbs.append( _lb[i] ); + targetUbs.append( _ub[i] ); + + ++len; + } + + // Find the index of i in the softmax + unsigned index = 0; + for ( const auto &sourceIndex : sources ) + { + if ( handledInputNeurons.exists( sourceIndex._neuron ) ) + ++index; + else + { + handledInputNeurons.insert( sourceIndex._neuron ); + break; + } + } + + double lb = linearLowerBound( sourceLbs, sourceUbs, index ); + double ub = linearUpperBound( sourceLbs, sourceUbs, index ); + if ( _lb[i] < lb ) + { + _lb[i] = lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > ub ) + { + _ub[i] = ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + targetLbs[index] = _lb[i]; + targetUbs[index] = _ub[i]; + + if ( FloatUtils::areEqual( _lb[i], _ub[i] ) ) + { + _symbolicLowerBias[i] = _lb[i]; + _symbolicUpperBias[i] = _ub[i]; + for ( const auto &sourceIndex : sources ) + { + symbolicLb[len * sourceIndex._neuron + i] = 0; + symbolicUb[len * sourceIndex._neuron + i] = 0; + } + } + else + { + // Compute symbolic bound + if ( boundType == SoftmaxBoundType::LOG_SUM_EXP_DECOMPOSITION ) + { + bool useLSE2 = false; + for ( const auto &lb : targetLbs ) + { + if ( lb > GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD ) + useLSE2 = true; + } + unsigned inputIndex = 0; + if ( !useLSE2 ) + { + _symbolicLowerBias[i] = + LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); + for ( const auto &sourceIndex : sources ) + { + double dldj = + dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + symbolicLb[len * sourceIndex._neuron + i] = dldj; + _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + } + else + { + _symbolicLowerBias[i] = + LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); + for ( const auto &sourceIndex : sources ) + { + double dldj = + dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + symbolicLb[len * sourceIndex._neuron + i] = dldj; + _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + } + + _symbolicUpperBias[i] = LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + inputIndex = 0; + for ( const auto &sourceIndex : sources ) + { + double dudj = + dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + symbolicUb[len * sourceIndex._neuron + i] = dudj; + _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + } + else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) + { + _symbolicLowerBias[i] = ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + unsigned inputIndex = 0; + for ( const auto &sourceIndex : sources ) + { + double dldj = + dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + symbolicLb[len * sourceIndex._neuron + i] = dldj; + _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + + _symbolicUpperBias[i] = ERUpperBound( sourceMids, targetLbs, targetUbs, index ); + inputIndex = 0; + for ( const auto &sourceIndex : sources ) + { + double dudj = + dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + symbolicUb[len * sourceIndex._neuron + i] = dudj; + _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + } + } + } + + for ( const auto &sourceLayerEntry : _sourceLayers ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerEntry.first ); + + /* + Perform the multiplication + + newUB = oldUB * posWeights + oldLB * negWeights + newLB = oldUB * negWeights + oldLB * posWeights + */ + + for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) + { + if ( symbolicLb[i] > 0 ) + _work[i] = symbolicLb[i]; + else + _work[i] = 0; + } + // _work is now positive weights in symbolicLb + matrixMultiplication( sourceLayer->getSymbolicLb(), + _work, + _symbolicLb, + _inputLayerSize, + sourceLayerSize, + _size ); + if ( sourceLayer->getSymbolicLowerBias() ) + matrixMultiplication( sourceLayer->getSymbolicLowerBias(), + _work, + _symbolicLowerBias, + 1, + sourceLayerSize, + _size ); + + for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) + { + if ( symbolicLb[i] < 0 ) + _work[i] = symbolicLb[i]; + else + _work[i] = 0; + } + // _work is now negative weights in symbolicLb + matrixMultiplication( sourceLayer->getSymbolicUb(), + _work, + _symbolicLb, + _inputLayerSize, + sourceLayerSize, + _size ); + if ( sourceLayer->getSymbolicLowerBias() ) + matrixMultiplication( sourceLayer->getSymbolicUpperBias(), + _work, + _symbolicLowerBias, + 1, + sourceLayerSize, + _size ); + + for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) + { + if ( symbolicUb[i] > 0 ) + _work[i] = symbolicUb[i]; + else + _work[i] = 0; + } + // _work is now positive weights in symbolicUb + matrixMultiplication( sourceLayer->getSymbolicUb(), + _work, + _symbolicUb, + _inputLayerSize, + sourceLayerSize, + _size ); + if ( sourceLayer->getSymbolicUpperBias() ) + matrixMultiplication( sourceLayer->getSymbolicUpperBias(), + _work, + _symbolicUpperBias, + 1, + sourceLayerSize, + _size ); + + for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) + { + if ( symbolicUb[i] < 0 ) + _work[i] = symbolicUb[i]; + else + _work[i] = 0; + } + // _work is now negative weights in symbolicUb + matrixMultiplication( sourceLayer->getSymbolicLb(), + _work, + _symbolicUb, + _inputLayerSize, + sourceLayerSize, + _size ); + if ( sourceLayer->getSymbolicUpperBias() ) + matrixMultiplication( sourceLayer->getSymbolicLowerBias(), + _work, + _symbolicUpperBias, + 1, + sourceLayerSize, + _size ); + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + } + + if ( symbolicLb ) + { + delete[] symbolicLb; + symbolicLb = NULL; + } + if ( symbolicUb ) + { + delete[] symbolicUb; + symbolicUb = NULL; + } + if ( _work ) + { + delete[] _work; + _work = NULL; + } +} + +void Layer::computeSymbolicBoundsForBilinear() +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + ASSERT( sources.size() == 2 ); + + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + bool allConstant = true; + unsigned indexA = 0; + unsigned indexB = 0; + unsigned counter = 0; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + + if ( counter == 0 ) + { + indexA = sourceIndex._neuron; + } + else + { + indexB = sourceIndex._neuron; + } + ++counter; + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicUpperBias[i] = sourceValues[0] * sourceValues[1]; + _symbolicLowerBias[i] = sourceValues[0] * sourceValues[1]; + continue; + } + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + // Symbolic lower bound: + // out >= alpha * x + beta * y + gamma + // where alpha = lb_y, beta = lb_x, gamma = -lb_x * lb_y + + // Symbolic upper bound: + // out <= alpha * x + beta * y + gamma + // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + if ( sourceLbs[1] >= 0 ) + { + _symbolicLb[j * _size + i] += + sourceLbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + } + else + { + _symbolicLb[j * _size + i] += + sourceLbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + } + + if ( sourceUbs[1] >= 0 ) + { + _symbolicUb[j * _size + i] += + sourceUbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + } + else + { + _symbolicUb[j * _size + i] += + sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + } + + if ( sourceLbs[0] >= 0 ) + { + _symbolicLb[j * _size + i] += + sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += + sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + } + else + { + _symbolicLb[j * _size + i] += + sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += + sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; + } + } + _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; + _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeSymbolicBoundsForWeightedSum() +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + else + { + _symbolicLowerBias[i] = _bias[i]; + _symbolicUpperBias[i] = _bias[i]; + } + } + + for ( const auto &sourceLayerEntry : _sourceLayers ) + { + unsigned sourceLayerIndex = sourceLayerEntry.first; + unsigned sourceLayerSize = sourceLayerEntry.second; + const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerEntry.first ); + + /* + Perform the multiplication + + newUB = oldUB * posWeights + oldLB * negWeights + newLB = oldUB * negWeights + oldLB * posWeights + */ + + matrixMultiplication( sourceLayer->getSymbolicUb(), + _layerToPositiveWeights[sourceLayerIndex], + _symbolicUb, + _inputLayerSize, + sourceLayerSize, + _size ); + matrixMultiplication( sourceLayer->getSymbolicLb(), + _layerToNegativeWeights[sourceLayerIndex], + _symbolicUb, + _inputLayerSize, + sourceLayerSize, + _size ); + matrixMultiplication( sourceLayer->getSymbolicLb(), + _layerToPositiveWeights[sourceLayerIndex], + _symbolicLb, + _inputLayerSize, + sourceLayerSize, + _size ); + matrixMultiplication( sourceLayer->getSymbolicUb(), + _layerToNegativeWeights[sourceLayerIndex], + _symbolicLb, + _inputLayerSize, + sourceLayerSize, + _size ); + + // Restore the zero bound on eliminated neurons + unsigned index; + for ( const auto &eliminated : _eliminatedNeurons ) + { + for ( unsigned i = 0; i < _inputLayerSize; ++i ) + { + index = i * _size + eliminated.first; + _symbolicLb[index] = 0; + _symbolicUb[index] = 0; + } + } + + /* + Compute the biases for the new layer + */ + for ( unsigned j = 0; j < _size; ++j ) + { + if ( _eliminatedNeurons.exists( j ) ) + continue; + + // Add the weighted bias from the source layer + for ( unsigned k = 0; k < sourceLayerSize; ++k ) + { + double weight = _layerToWeights[sourceLayerIndex][k * _size + j]; + + if ( weight > 0 ) + { + _symbolicLowerBias[j] += sourceLayer->getSymbolicLowerBias()[k] * weight; + _symbolicUpperBias[j] += sourceLayer->getSymbolicUpperBias()[k] * weight; + } + else + { + _symbolicLowerBias[j] += sourceLayer->getSymbolicUpperBias()[k] * weight; + _symbolicUpperBias[j] += sourceLayer->getSymbolicLowerBias()[k] * weight; + } + } + } + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + + /* + We now have the tightest bounds we can for the + weighted sum variable. If they are tigheter than + what was previously known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeParameterisedSymbolicBounds( const Vector &coeffs, bool receive ) +{ + switch ( _type ) + { + case RELU: + computeParameterisedSymbolicBoundsForRelu( coeffs, receive ); + break; + + case SIGN: + computeParameterisedSymbolicBoundsForSign( coeffs, receive ); + break; + + case LEAKY_RELU: + computeParameterisedSymbolicBoundsForLeakyRelu( coeffs, receive ); + break; + + case BILINEAR: + computeParameterisedSymbolicBoundsForBilinear( coeffs, receive ); + break; + + default: + computeSymbolicBounds(); + break; + } +} + +void Layer::computeParameterisedSymbolicBoundsForRelu( const Vector &coeffs, bool receive ) +{ + ASSERT( coeffs.size() == 1 ); + + double coeff = coeffs[0]; + ASSERT( coeff >= 0 && coeff <= 1 ); + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + /* + There are two ways we can determine that a ReLU has become fixed: + + 1. If the ReLU's variable has been externally fixed + 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) + */ + PhaseStatus reluPhase = PHASE_NOT_FIXED; + + // Has the f variable been eliminated or fixed? + if ( FloatUtils::isPositive( _lb[i] ) ) + reluPhase = RELU_PHASE_ACTIVE; + else if ( FloatUtils::isZero( _ub[i] ) ) + reluPhase = RELU_PHASE_INACTIVE; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A ReLU initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) + { + reluPhase = RELU_PHASE_ACTIVE; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + reluPhase = RELU_PHASE_INACTIVE; + } + + if ( reluPhase == PHASE_NOT_FIXED ) + { + // If we got here, we know that lbLb < 0 and ubUb + // > 0 There are four possible cases, depending on + // whether ubLb and lbUb are negative or positive + // (see Neurify paper, page 14). + + // Upper bound + if ( _symbolicLbOfUb[i] <= 0 ) + { + // lbOfUb[i] < 0 < ubOfUb[i] + // Concretize the upper bound using the Ehler's-like approximation + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] = _symbolicUb[j * _size + i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] = _symbolicUpperBias[i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); + _symbolicUpperBias[i] -= _symbolicLbOfUb[i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); + } + + // Lower bound: y >= coeff * x (varies continuously between y >= 0 and y >= alpha * x). + if ( _symbolicUbOfLb[i] <= 0 ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] *= coeff; + + _symbolicLowerBias[i] *= coeff; + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] = _symbolicLb[j * _size + i] * _symbolicUbOfLb[i] / + ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); + + _symbolicLowerBias[i] = _symbolicLowerBias[i] * _symbolicUbOfLb[i] / + ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); + } + + _symbolicLbOfLb[i] = 0; + } + else + { + // The phase of this ReLU is fixed! + if ( reluPhase == RELU_PHASE_ACTIVE ) + { + // Active ReLU, bounds are propagated as is + } + else + { + // Inactive ReLU, returns zero + _symbolicLbOfLb[i] = 0; + _symbolicUbOfLb[i] = 0; + _symbolicLbOfUb[i] = 0; + _symbolicUbOfUb[i] = 0; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicLowerBias[i] = 0; + _symbolicUpperBias[i] = 0; + } + } + + if ( _symbolicLbOfUb[i] < 0 ) + _symbolicLbOfUb[i] = 0; + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( receive ) + { + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +void Layer::computeParameterisedSymbolicBoundsForSign( const Vector &coeffs, bool receive ) +{ + ASSERT( coeffs.size() == 2 ); + ASSERT( coeffs[0] >= 0 && coeffs[0] <= 1 ); + ASSERT( coeffs[1] >= 0 && coeffs[1] <= 1 ); + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + // Eliminate neurons are skipped + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + + continue; + } + + /* + There are two ways we can determine that a Sign has become fixed: + + 1. If the Sign's variable has been externally fixed + 2. lbLb >= 0 (Positive) or ubUb < 0 (Negative) + */ + PhaseStatus signPhase = PHASE_NOT_FIXED; + + // Has the f variable been eliminated or fixed? + if ( !FloatUtils::isNegative( _lb[i] ) ) + signPhase = SIGN_PHASE_POSITIVE; + else if ( FloatUtils::isNegative( _ub[i] ) ) + signPhase = SIGN_PHASE_NEGATIVE; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A Sign initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) + { + signPhase = SIGN_PHASE_POSITIVE; + } + else if ( FloatUtils::isNegative( sourceUb ) ) + { + signPhase = SIGN_PHASE_NEGATIVE; + } + + if ( signPhase == PHASE_NOT_FIXED ) + { + PhaseStatus upperSignPhase = PHASE_NOT_FIXED; + PhaseStatus lowerSignPhase = PHASE_NOT_FIXED; + + // If we got here, we know that lbLb < 0 and ubUb + // > 0 + + // Upper bound + if ( !FloatUtils::isNegative( _symbolicLbOfUb[i] ) ) + { + // The upper bound is strictly positive - turns into + // the constant 1 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] = 0; + + _symbolicUpperBias[i] = 1; + + upperSignPhase = SIGN_PHASE_POSITIVE; + } + else + { + // The upper bound's phase is not fixed, use parameterised + // parallelogram approximation: y <= - 2 / l * coeffs[0] * x + 1 + // (varies continuously between y <= 1 and y <= -2 / l * x + 1). + double factor = -2.0 / _symbolicLbOfLb[i] * coeffs[0]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] *= factor; + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= factor; + _symbolicUpperBias[i] += 1; + } + + // Lower bound + if ( FloatUtils::isNegative( _symbolicUbOfLb[i] ) ) + { + // The lower bound is strictly negative - turns into + // the constant -1 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] = 0; + + _symbolicLowerBias[i] = -1; + + lowerSignPhase = SIGN_PHASE_NEGATIVE; + } + else + { + // The lower bound's phase is not fixed, use parameterised + // parallelogram approximation: y >= 2 / u * coeffs[1] * x - 1 + // (varies continuously between y >= -1 and y >= 2 / u * x - 1). + double factor = 2.0 / _symbolicUbOfUb[i] * coeffs[1]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= factor; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= factor; + _symbolicLowerBias[i] -= 1; + } + + if ( upperSignPhase == PHASE_NOT_FIXED ) + { + _symbolicUbOfUb[i] = 1; + _symbolicLbOfUb[i] = -1; + } + else + { + _symbolicUbOfUb[i] = 1; + _symbolicLbOfUb[i] = 1; + } + + if ( lowerSignPhase == PHASE_NOT_FIXED ) + { + _symbolicUbOfLb[i] = 1; + _symbolicLbOfLb[i] = -1; + } + else + { + _symbolicUbOfLb[i] = -1; + _symbolicLbOfLb[i] = -1; + } + } + else + { + // The phase of this Sign is fixed! + double constant = ( signPhase == SIGN_PHASE_POSITIVE ) ? 1 : -1; + + _symbolicLbOfLb[i] = constant; + _symbolicUbOfLb[i] = constant; + _symbolicLbOfUb[i] = constant; + _symbolicUbOfUb[i] = constant; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicLowerBias[i] = constant; + _symbolicUpperBias[i] = constant; + } + + if ( _symbolicLbOfLb[i] < -1 ) + _symbolicLbOfLb[i] = -1; + if ( _symbolicUbOfUb[i] > 1 ) + _symbolicUbOfUb[i] = 1; + + /* + We now have the tightest bounds we can for the sign + variable. If they are tigheter than what was previously + known, store them. + */ + if ( receive ) + { + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( const Vector &coeffs, + bool receive ) +{ + ASSERT( _alpha > 0 && _alpha < 1 ); + ASSERT( coeffs.size() == 1 ); + double coeff = coeffs[0]; + ASSERT( coeff >= 0 && coeff <= 1 ); + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + /* + There are two ways we can determine that a LeakyReLU has become fixed: + + 1. If the LeakyReLU's variable has been externally fixed + 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) + */ + PhaseStatus leakyReluPhase = PHASE_NOT_FIXED; + + // Has the f variable been eliminated or fixed? + if ( FloatUtils::isPositive( _lb[i] ) ) + leakyReluPhase = RELU_PHASE_ACTIVE; + else if ( FloatUtils::isZero( _ub[i] ) ) + leakyReluPhase = RELU_PHASE_INACTIVE; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A LeakyReLU initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) + { + leakyReluPhase = RELU_PHASE_ACTIVE; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + leakyReluPhase = RELU_PHASE_INACTIVE; + } + + if ( leakyReluPhase == PHASE_NOT_FIXED ) + { + // LeakyReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + // Concrete upper bound: x_f <= ub_b + double width = sourceUb - sourceLb; + double weight = ( sourceUb - _alpha * sourceLb ) / width; + + if ( _alpha <= 1 ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= weight; + } + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= weight; + _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; + + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We + // use the heuristic described in section 4.1 of + // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + // to set the value of lambda (either 0 or 1 is considered). + + // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = + // alpha and lambda = 1). Symbolic lower bound: x_f >= ((1 - alpha) * coeff + + // alpha) x_b Concrete lower bound: x_f >= 0 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= ( 1 - _alpha ) * coeff + _alpha; + } + + _symbolicLowerBias[i] *= ( 1 - _alpha ) * coeff + _alpha; + } + + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= weight; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= weight; + _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; + + // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = + // alpha and lambda = 1). Symbolic lower bound: x_f >= ((1 - alpha) * coeff + + // alpha) x_b Concrete lower bound: x_f >= 0 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= ( 1 - _alpha ) * coeff + _alpha; + } + + _symbolicUpperBias[i] *= ( 1 - _alpha ) * coeff + _alpha; + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + } + else + { + // The phase of this LeakyReLU is fixed! + if ( leakyReluPhase == RELU_PHASE_ACTIVE ) + { + // Positive LeakyReLU, bounds are propagated as is + } + else + { + // Negative LeakyReLU, bounds are multiplied by _alpha + _symbolicLbOfLb[i] *= _alpha; + _symbolicUbOfLb[i] *= _alpha; + _symbolicLbOfUb[i] *= _alpha; + _symbolicUbOfUb[i] *= _alpha; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= _alpha; + _symbolicLb[j * _size + i] *= _alpha; + } + + _symbolicLowerBias[i] *= _alpha; + _symbolicUpperBias[i] *= _alpha; + } + } + + if ( _symbolicUbOfUb[i] > sourceUb ) + _symbolicUbOfUb[i] = sourceUb; + if ( _symbolicLbOfLb[i] < _alpha * sourceLb ) + _symbolicLbOfLb[i] = _alpha * sourceLb; + + /* + We now have the tightest bounds we can for the leakyRelu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( receive ) + { + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector &coeffs, + bool receive ) +{ + ASSERT( coeffs.size() == 2 ); + ASSERT( coeffs[0] >= 0 && coeffs[0] <= 1 ); + ASSERT( coeffs[1] >= 0 && coeffs[1] <= 1 ); + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + ASSERT( sources.size() == 2 ); + + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + bool allConstant = true; + unsigned indexA = 0; + unsigned indexB = 0; + unsigned counter = 0; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + + if ( counter == 0 ) + { + indexA = sourceIndex._neuron; + } + else + { + indexB = sourceIndex._neuron; + } + ++counter; + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicUpperBias[i] = sourceValues[0] * sourceValues[1]; + _symbolicLowerBias[i] = sourceValues[0] * sourceValues[1]; + continue; + } + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + // Billinear linear relaxation (arXiv:2405.21063v2 [cs.LG]) + // Lower bound: out >= aLower * x + bLower * y + c_l, where + // aLower = alpha1 * l_y + ( 1 - alpha1 ) * u_y + // bLower = alpha1 * l_x + ( 1 - alpha1 ) * u_x + // cLower = -alpha1 * l_x * l_y - ( 1 - alpha1 ) * u_x * u_y + + // Upper bound: out <= aUpper * x + bUpper * y + c_u, where + // aUpper = alpha2 * u_y + ( 1 - alpha2 ) * l_y + // bUpper = alpha2 * l_x + ( 1 - alpha2 ) * u_x + // cUpper = -alpha2 * l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + + double aLower = coeffs[0] * sourceLbs[1] + ( 1 - coeffs[0] ) * sourceUbs[1]; + double aUpper = coeffs[1] * sourceUbs[1] + ( 1 - coeffs[1] ) * sourceLbs[1]; + double bLower = coeffs[0] * sourceLbs[0] + ( 1 - coeffs[0] ) * sourceUbs[0]; + double bUpper = coeffs[1] * sourceLbs[0] + ( 1 - coeffs[1] ) * sourceUbs[0]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + if ( aLower >= 0 ) + { + _symbolicLb[j * _size + i] += + aLower * sourceSymbolicLb[j * sourceLayerSize + indexA]; + } + else + { + _symbolicLb[j * _size + i] += + aLower * sourceSymbolicUb[j * sourceLayerSize + indexA]; + } + + if ( aUpper >= 0 ) + { + _symbolicUb[j * _size + i] += + aUpper * sourceSymbolicUb[j * sourceLayerSize + indexA]; + } + else + { + _symbolicUb[j * _size + i] += + aUpper * sourceSymbolicLb[j * sourceLayerSize + indexA]; + } + + if ( bLower >= 0 ) + { + _symbolicLb[j * _size + i] += + bLower * sourceSymbolicLb[j * sourceLayerSize + indexB]; + } + else + { + _symbolicLb[j * _size + i] += + bLower * sourceSymbolicUb[j * sourceLayerSize + indexB]; + } + + if ( bLower >= 0 ) + { + _symbolicUb[j * _size + i] += + bUpper * sourceSymbolicUb[j * sourceLayerSize + indexB]; + } + else + { + _symbolicUb[j * _size + i] += + bUpper * sourceSymbolicLb[j * sourceLayerSize + indexB]; + } + } + + _symbolicLowerBias[i] = -coeffs[0] * sourceLbs[0] * sourceLbs[1] - + ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1]; + _symbolicUpperBias[i] = -coeffs[1] * sourceLbs[0] * sourceUbs[1] - + ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1]; + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( receive ) + { + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +double Layer::LSELowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + double sum = 0; + for ( unsigned j = 0; j < inputs.size(); ++j ) + { + double lj = inputLbs[j]; + double uj = inputUbs[j]; + double xj = inputs[j]; + sum += + ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); + } + + return std::exp( inputs[i] ) / sum; +} + +double Layer::dLSELowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) +{ + double val = 0; + if ( i == di ) + val += LSELowerBound( inputMids, inputLbs, inputUbs, i ); + + double ldi = inputLbs[di]; + double udi = inputUbs[di]; + + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + double lj = inputLbs[j]; + double uj = inputUbs[j]; + double xj = inputMids[j]; + + sum += + ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); + } + + val -= std::exp( inputMids[i] ) / ( sum * sum ) * ( std::exp( udi ) - std::exp( ldi ) ) / + ( udi - ldi ); + + return val; +} + +double Layer::LSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + double max = FloatUtils::negativeInfinity(); + unsigned maxInputIndex = 0; + unsigned index = 0; + for ( const auto &mid : inputMids ) + { + if ( mid > max ) + { + max = mid; + maxInputIndex = index; + } + ++index; + } + + if ( maxInputIndex == i ) + return ERLowerBound( inputMids, inputLbs, inputUbs, i ); + else + { + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + sum += 1; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + double xjjstar = inputMids[j] - inputMids[maxInputIndex]; + + sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + + ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); + } + } + + return std::exp( inputMids[i] - inputMids[maxInputIndex] ) / sum; + } +} + +double Layer::dLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) +{ + double max = FloatUtils::negativeInfinity(); + unsigned maxInputIndex = 0; + unsigned index = 0; + for ( const auto &mid : inputMids ) + { + if ( mid > max ) + { + max = mid; + maxInputIndex = index; + } + ++index; + } + + if ( maxInputIndex == i ) + return dERLowerBound( inputMids, inputLbs, inputUbs, i, di ); + else + { + double val = LSELowerBound2( inputMids, inputLbs, inputUbs, i ); + + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + sum += 1; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + double xjjstar = inputMids[j] - inputMids[maxInputIndex]; + sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + + ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); + } + } + double val2 = std::exp( inputMids[i] - inputMids[maxInputIndex] ) / ( sum * sum ); + + if ( i == di ) + { + double ldijstar = inputLbs[i] - inputUbs[maxInputIndex]; + double udijstar = inputUbs[i] - inputLbs[maxInputIndex]; + return val - + val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / ( udijstar - ldijstar ); + } + else if ( maxInputIndex == di ) + { + double sum2 = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + continue; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + sum2 += ( std::exp( ujjstar ) - std::exp( ljjstar ) ) / ( ujjstar - ljjstar ); + } + } + return -val + val2 * sum2; + } + else + { + double ldijstar = inputLbs[di] - inputUbs[maxInputIndex]; + double udijstar = inputUbs[di] - inputLbs[maxInputIndex]; + return -val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / + ( udijstar - ldijstar ); + } + } +} + +double Layer::LSEUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) +{ + double li = outputLb[i]; + double ui = outputUb[i]; + + Vector inputTilda; + SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); + + return ( ( li * std::log( ui ) - ui * std::log( li ) ) / ( std::log( ui ) - std::log( li ) ) - + ( ui - li ) / ( std::log( ui ) - std::log( li ) ) * + SoftmaxConstraint::logSumOfExponential( inputTilda ) ); +} + +double Layer::dLSEUpperbound( const Vector &inputMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ) +{ + double li = outputLb[i]; + double ui = outputUb[i]; + + double val = -( ui - li ) / ( std::log( ui ) - std::log( li ) ); + + double val2 = std::exp( inputMids[di] ) / SoftmaxConstraint::sumOfExponential( inputMids ); + if ( i == di ) + val2 -= 1; + + return val * val2; +} + +double Layer::ERLowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + Vector inputTilda; + SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); + + double sum = 0; + for ( unsigned j = 0; j < inputs.size(); ++j ) + { + if ( i == j ) + sum += 1; + else + { + double ljTilda = inputLbs[j] - inputUbs[i]; + double ujTilda = inputUbs[j] - inputLbs[i]; + double xjTilda = inputTilda[j]; + + sum += ( ujTilda - xjTilda ) / ( ujTilda - ljTilda ) * std::exp( ljTilda ) + + ( xjTilda - ljTilda ) / ( ujTilda - ljTilda ) * std::exp( ujTilda ); + } + } + + return 1 / sum; +} + +double Layer::dERLowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) +{ + double val = ERLowerBound( inputMids, inputLbs, inputUbs, i ); + + if ( i != di ) + { + double ldiTilda = inputLbs[di] - inputUbs[i]; + double udiTilda = inputUbs[di] - inputLbs[i]; + return -val * val * ( std::exp( udiTilda ) - std::exp( ldiTilda ) ) / + ( udiTilda - ldiTilda ); + } + else + { + double val2 = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j != i ) + { + double ljTilda = inputLbs[j] - inputUbs[i]; + double ujTilda = inputUbs[j] - inputLbs[i]; + val2 += ( std::exp( ujTilda ) - std::exp( ljTilda ) ) / ( ujTilda - ljTilda ); + } + } + return val * val * val2; + } +} + +double Layer::ERUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) +{ + double li = outputLb[i]; + double ui = outputUb[i]; + + Vector inputTilda; + SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); + + return ui + li - ui * li * SoftmaxConstraint::sumOfExponential( inputTilda ); +} + +double Layer::dERUpperBound( const Vector &inputMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ) +{ + double li = outputLb[i]; + double ui = outputUb[i]; + + + if ( i == di ) + { + double val2 = -1; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + val2 += std::exp( inputMids[j] - inputMids[i] ); + return li * ui * val2; + } + else + return -li * ui * std::exp( inputMids[di] - inputMids[i] ); +} + +double Layer::linearLowerBound( const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + Vector uTilda; + SoftmaxConstraint::xTilda( inputUbs, inputLbs[i], uTilda ); + uTilda[i] = 0; + return 1 / SoftmaxConstraint::sumOfExponential( uTilda ); +} + +double Layer::linearUpperBound( const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + Vector lTilda; + SoftmaxConstraint::xTilda( inputLbs, inputUbs[i], lTilda ); + lTilda[i] = 0; + return 1 / SoftmaxConstraint::sumOfExponential( lTilda ); +} + +void Layer::eliminateVariable( unsigned variable, double value ) +{ + if ( !_variableToNeuron.exists( variable ) ) + return; + + ASSERT( _type != INPUT ); + + unsigned neuron = _variableToNeuron[variable]; + _eliminatedNeurons[neuron] = value; + _lb[neuron] = value; + _ub[neuron] = value; + _neuronToVariable.erase( _variableToNeuron[variable] ); + _variableToNeuron.erase( variable ); +} + +void Layer::updateVariableIndices( const Map &oldIndexToNewIndex, + const Map &mergedVariables ) +{ + // First, do a pass to handle any merged variables + auto neuronIt = _neuronToVariable.begin(); + while ( neuronIt != _neuronToVariable.end() ) + { + unsigned variable = neuronIt->second; + while ( mergedVariables.exists( variable ) ) + { + neuronIt->second = mergedVariables[variable]; + variable = neuronIt->second; + } + + ++neuronIt; + } + + // Now handle re-indexing + neuronIt = _neuronToVariable.begin(); + while ( neuronIt != _neuronToVariable.end() ) + { + unsigned variable = neuronIt->second; + + if ( !oldIndexToNewIndex.exists( variable ) ) + { + // This variable has been eliminated, remove from map + neuronIt = _neuronToVariable.erase( neuronIt ); + } + else + { + if ( oldIndexToNewIndex[variable] == variable ) + { + // Index hasn't changed, skip + } + else + { + // Index has changed + neuronIt->second = oldIndexToNewIndex[variable]; + } + + ++neuronIt; + continue; + } + } +} + +unsigned Layer::getLayerIndex() const +{ + return _layerIndex; +} + +Layer::Layer( const Layer *other ) + : _bias( NULL ) + , _assignment( NULL ) + , _lb( NULL ) + , _ub( NULL ) + , _inputLayerSize( 0 ) + , _symbolicLb( NULL ) + , _symbolicUb( NULL ) + , _symbolicLowerBias( NULL ) + , _symbolicUpperBias( NULL ) + , _symbolicLbOfLb( NULL ) + , _symbolicUbOfLb( NULL ) + , _symbolicLbOfUb( NULL ) + , _symbolicUbOfUb( NULL ) +{ + _layerIndex = other->_layerIndex; + _type = other->_type; + _size = other->_size; + _layerOwner = other->_layerOwner; + _alpha = other->_alpha; + + allocateMemory(); + + for ( auto &sourceLayerEntry : other->_sourceLayers ) + { + addSourceLayer( sourceLayerEntry.first, sourceLayerEntry.second ); + + if ( other->_layerToWeights.exists( sourceLayerEntry.first ) ) + memcpy( _layerToWeights[sourceLayerEntry.first], + other->_layerToWeights[sourceLayerEntry.first], + sizeof( double ) * sourceLayerEntry.second * _size ); + + if ( other->_layerToPositiveWeights.exists( sourceLayerEntry.first ) ) + memcpy( _layerToPositiveWeights[sourceLayerEntry.first], + other->_layerToPositiveWeights[sourceLayerEntry.first], + sizeof( double ) * sourceLayerEntry.second * _size ); + + if ( other->_layerToNegativeWeights.exists( sourceLayerEntry.first ) ) + memcpy( _layerToNegativeWeights[sourceLayerEntry.first], + other->_layerToNegativeWeights[sourceLayerEntry.first], + sizeof( double ) * sourceLayerEntry.second * _size ); + } + + _successorLayers = other->_successorLayers; + + if ( other->_bias ) + memcpy( _bias, other->_bias, sizeof( double ) * _size ); + + _neuronToActivationSources = other->_neuronToActivationSources; + + _neuronToVariable = other->_neuronToVariable; + _variableToNeuron = other->_variableToNeuron; + _eliminatedNeurons = other->_eliminatedNeurons; + + _inputLayerSize = other->_inputLayerSize; +} + +const double *Layer::getSymbolicLb() const +{ + return _symbolicLb; +} + +const double *Layer::getSymbolicUb() const +{ + return _symbolicUb; +} + +const double *Layer::getSymbolicLowerBias() const +{ + return _symbolicLowerBias; +} + +const double *Layer::getSymbolicUpperBias() const +{ + return _symbolicUpperBias; +} + +double Layer::getSymbolicLbOfLb( unsigned neuron ) const +{ + return _symbolicLbOfLb[neuron]; +} + +double Layer::getSymbolicUbOfLb( unsigned neuron ) const +{ + return _symbolicUbOfLb[neuron]; +} + +double Layer::getSymbolicLbOfUb( unsigned neuron ) const +{ + return _symbolicLbOfUb[neuron]; +} + +double Layer::getSymbolicUbOfUb( unsigned neuron ) const +{ + return _symbolicUbOfUb[neuron]; +} + +void Layer::freeMemoryIfNeeded() +{ + for ( const auto &weights : _layerToWeights ) + delete[] weights.second; + _layerToWeights.clear(); + + for ( const auto &weights : _layerToPositiveWeights ) + delete[] weights.second; + _layerToPositiveWeights.clear(); + + for ( const auto &weights : _layerToNegativeWeights ) + delete[] weights.second; + _layerToNegativeWeights.clear(); + + if ( _bias ) + { + delete[] _bias; + _bias = NULL; + } + + if ( _assignment ) + { + delete[] _assignment; + _assignment = NULL; + } + + if ( _lb ) + { + delete[] _lb; + _lb = NULL; + } + + if ( _ub ) + { + delete[] _ub; + _ub = NULL; + } + + if ( _symbolicLb ) + { + delete[] _symbolicLb; + _symbolicLb = NULL; + } + + if ( _symbolicUb ) + { + delete[] _symbolicUb; + _symbolicUb = NULL; + } + + if ( _symbolicLowerBias ) + { + delete[] _symbolicLowerBias; + _symbolicLowerBias = NULL; + } + + if ( _symbolicUpperBias ) + { + delete[] _symbolicUpperBias; + _symbolicUpperBias = NULL; + } + + if ( _symbolicLbOfLb ) + { + delete[] _symbolicLbOfLb; + _symbolicLbOfLb = NULL; + } + + if ( _symbolicUbOfLb ) + { + delete[] _symbolicUbOfLb; + _symbolicUbOfLb = NULL; + } + + if ( _symbolicLbOfUb ) + { + delete[] _symbolicLbOfUb; + _symbolicLbOfUb = NULL; + } + + if ( _symbolicUbOfUb ) + { + delete[] _symbolicUbOfUb; + _symbolicUbOfUb = NULL; + } +} + +String Layer::typeToString( Type type ) +{ + switch ( type ) + { + case INPUT: + return "INPUT"; + break; + + case WEIGHTED_SUM: + return "WEIGHTED_SUM"; + break; + + case RELU: + return "RELU"; + break; + + case LEAKY_RELU: + return "LEAKY_RELU"; + break; + + case SIGMOID: + return "SIGMOID"; + break; + + case ABSOLUTE_VALUE: + return "ABSOLUTE_VALUE"; + break; + + case MAX: + return "MAX"; + break; + + case SIGN: + return "SIGN"; + break; + + case ROUND: + return "ROUND"; + break; + + case SOFTMAX: + return "SOFTMAX"; + break; + + case BILINEAR: + return "BILINEAR"; + break; + + default: + return "UNKNOWN TYPE"; + break; + } +} + +void Layer::dump() const +{ + printf( "\nDumping info for layer %u (%s):\n", _layerIndex, typeToString( _type ).ascii() ); + printf( "\tNeurons:\n" ); + + switch ( _type ) + { + case INPUT: + printf( "\t\t" ); + for ( unsigned i = 0; i < _size; ++i ) + printf( "x%u ", _neuronToVariable[i] ); + + printf( "\n\n" ); + break; + + case WEIGHTED_SUM: + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + printf( "\t\tNeuron %u = %+.4lf\t[ELIMINATED]\n", i, _eliminatedNeurons[i] ); + continue; + } + + printf( "\t\tx%u = %+.4lf\n\t\t\t", _neuronToVariable[i], _bias[i] ); + for ( const auto &sourceLayerEntry : _sourceLayers ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerEntry.first ); + for ( unsigned j = 0; j < sourceLayer->getSize(); ++j ) + { + double weight = _layerToWeights[sourceLayerEntry.first][j * _size + i]; + if ( !FloatUtils::isZero( weight ) ) + { + if ( sourceLayer->_neuronToVariable.exists( j ) ) + printf( "%+.5lfx%u ", weight, sourceLayer->_neuronToVariable[j] ); + else + printf( "%+.5lf", weight * sourceLayer->_eliminatedNeurons[j] ); + } + } + } + printf( "\n" ); + } + + printf( "\n" ); + break; + + case RELU: + case ROUND: + case LEAKY_RELU: + case ABSOLUTE_VALUE: + case MAX: + case SIGN: + case SIGMOID: + case BILINEAR: + case SOFTMAX: + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + printf( "\t\tNeuron %u = %+.4lf\t[ELIMINATED]\n", i, _eliminatedNeurons[i] ); + continue; + } + + printf( "\t\tSources for x%u: ", _neuronToVariable[i] ); + for ( const auto &source : _neuronToActivationSources[i] ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + if ( sourceLayer->_neuronToVariable.exists( source._neuron ) ) + printf( "x%u ", sourceLayer->_neuronToVariable[source._neuron] ); + else + printf( "Neuron_%u (eliminated)", source._neuron ); + } + + printf( "\n" ); + } + + printf( "\n" ); + break; + } +} + +bool Layer::neuronHasVariable( unsigned neuron ) const +{ + ASSERT( _neuronToVariable.exists( neuron ) || _eliminatedNeurons.exists( neuron ) ); + return _neuronToVariable.exists( neuron ); +} + +unsigned Layer::neuronToVariable( unsigned neuron ) const +{ + DEBUG( { + if ( !_neuronToVariable.exists( neuron ) ) + ASSERT( _eliminatedNeurons.exists( neuron ) ); + } ) + + ASSERT( _neuronToVariable.exists( neuron ) ); + return _neuronToVariable[neuron]; +} + +unsigned Layer::variableToNeuron( unsigned variable ) const +{ + ASSERT( _variableToNeuron.exists( variable ) ); + return _variableToNeuron[variable]; +} + +bool Layer::neuronEliminated( unsigned neuron ) const +{ + return _eliminatedNeurons.exists( neuron ); +} + +double Layer::getEliminatedNeuronValue( unsigned neuron ) const +{ + ASSERT( _eliminatedNeurons.exists( neuron ) ); + return _eliminatedNeurons[neuron]; +} + +void Layer::reduceIndexFromAllMaps( unsigned startIndex ) +{ + // Adjust the source layers + Map copyOfSources = _sourceLayers; + _sourceLayers.clear(); + for ( const auto &pair : copyOfSources ) + _sourceLayers[pair.first >= startIndex ? pair.first - 1 : pair.first] = pair.second; + + // Adjust all weight maps + adjustWeightMapIndexing( _layerToWeights, startIndex ); + adjustWeightMapIndexing( _layerToPositiveWeights, startIndex ); + adjustWeightMapIndexing( _layerToNegativeWeights, startIndex ); + + // Adjust the neuron activations + for ( auto &neuronToSources : _neuronToActivationSources ) + { + for ( auto &source : neuronToSources.second ) + { + if ( source._layer >= startIndex ) + --source._layer; + } + } +} + +void Layer::adjustWeightMapIndexing( Map &map, unsigned startIndex ) +{ + Map copyOfWeights = map; + map.clear(); + for ( const auto &pair : copyOfWeights ) + map[pair.first >= startIndex ? pair.first - 1 : pair.first] = pair.second; +} + +void Layer::reduceIndexAfterMerge( unsigned startIndex ) +{ + if ( _layerIndex >= startIndex ) + --_layerIndex; +} + +bool Layer::operator==( const Layer &layer ) const +{ + if ( _layerIndex != layer._layerIndex ) + return false; + + if ( _type != layer._type ) + return false; + + if ( _size != layer._size ) + return false; + + if ( _inputLayerSize != layer._inputLayerSize ) + return false; + + if ( ( _bias && !layer._bias ) || ( !_bias && layer._bias ) ) + return false; + + if ( _bias && layer._bias ) + { + if ( std::memcmp( _bias, layer._bias, _size * sizeof( double ) ) != 0 ) + return false; + } + + if ( _sourceLayers != layer._sourceLayers ) + return false; + + if ( !compareWeights( _layerToWeights, layer._layerToWeights ) ) + return false; + + if ( !compareWeights( _layerToPositiveWeights, layer._layerToPositiveWeights ) ) + return false; + + if ( !compareWeights( _layerToNegativeWeights, layer._layerToNegativeWeights ) ) + return false; + + return true; +} + +bool Layer::compareWeights( const Map &map, + const Map &mapOfOtherLayer ) const +{ + if ( map.size() != mapOfOtherLayer.size() ) + return false; + + for ( const auto &pair : map ) + { + unsigned key = pair.first; + double *value = pair.second; + + if ( !mapOfOtherLayer.exists( key ) ) + return false; + + if ( std::memcmp( + value, mapOfOtherLayer[key], _size * _sourceLayers[key] * sizeof( double ) ) != 0 ) + { + return false; + } + } + + return true; +} + +unsigned Layer::getMaxVariable() const +{ + unsigned result = 0; + for ( const auto &pair : _neuronToVariable ) + if ( pair.second > result ) + result = pair.second; + + return result; +} + +void Layer::dumpBounds() const +{ + printf( "Layer %u:\n", _layerIndex ); + for ( unsigned i = 0; i < _size; ++i ) + { + printf( "\tNeuron%u\tLB: %.4f, UB: %.4f \n", i + 1, _lb[i], _ub[i] ); + } + printf( "\n" ); +} + +} // namespace NLR From a9d0930e961c9587998c06a03c820f1cab1a8082 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Wed, 17 Sep 2025 21:11:38 +0300 Subject: [PATCH 74/81] Add files via upload --- src/nlr/LPFormulator.cpp | 195 +++++++++----- src/nlr/Layer.cpp | 563 ++++++++++++--------------------------- 2 files changed, 300 insertions(+), 458 deletions(-) diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 84447306f9..67f2725155 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -248,9 +248,11 @@ void LPFormulator::optimizeBoundsWithIncrementalLpRelaxation( const Map &layers, - bool backward, - const Vector &coeffs ) +void LPFormulator::optimizeBoundsWithLpRelaxation( + const Map &layers, + bool backward, + const Map> &layerIndicesToParameters, + const Vector &polygonalTightenings ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -303,7 +305,8 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map &solverToIndex ); // optimize every neuron of layer - optimizeBoundsOfNeuronsWithLpRelaxation( argument, backward, coeffs ); + optimizeBoundsOfNeuronsWithLpRelaxation( + argument, backward, layerIndicesToParameters, polygonalTightenings ); LPFormulator_LOG( Stringf( "Tightening bound for layer %u - done", layerIndex ).ascii() ); } @@ -337,13 +340,6 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map throw InfeasibleQueryException(); } -void LPFormulator::optimizeBoundsWithPreimageApproximation( Map &layers ) -{ - const Vector &optimal_coeffs = layers[0]->OptimalParameterisedSymbolicBoundTightening(); - optimizeBoundsWithLpRelaxation( layers, false, optimal_coeffs ); - optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs ); -} - void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ) { @@ -417,9 +413,11 @@ void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &coeffs ) +void LPFormulator::optimizeBoundsOfNeuronsWithLpRelaxation( + ThreadArgument &args, + bool backward, + const Map> &layerIndicesToParameters, + const Vector &polygonalTightenings ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -532,9 +530,17 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args mtx.lock(); if ( backward ) - createLPRelaxationAfter( layers, *freeSolver, lastIndexOfRelaxation, coeffs ); + createLPRelaxationAfter( layers, + *freeSolver, + lastIndexOfRelaxation, + layerIndicesToParameters, + polygonalTightenings ); else - createLPRelaxation( layers, *freeSolver, lastIndexOfRelaxation, coeffs ); + createLPRelaxation( layers, + *freeSolver, + lastIndexOfRelaxation, + layerIndicesToParameters, + polygonalTightenings ); mtx.unlock(); // spawn a thread to tighten the bounds for the current variable @@ -654,10 +660,12 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & } } -void LPFormulator::createLPRelaxation( const Map &layers, - GurobiWrapper &gurobi, - unsigned lastLayer, - const Vector &coeffs ) +void LPFormulator::createLPRelaxation( + const Map &layers, + GurobiWrapper &gurobi, + unsigned lastLayer, + const Map> &layerIndicesToParameters, + const Vector &polygonalTightenings ) { for ( const auto &layer : layers ) { @@ -665,22 +673,23 @@ void LPFormulator::createLPRelaxation( const Map &layers, if ( currentLayerIndex > lastLayer ) continue; - if ( coeffs.empty() ) + if ( layerIndicesToParameters.empty() ) addLayerToModel( gurobi, layer.second, false ); else { - Map> layerIndicesToParameters = - layer.second->getParametersForLayers( layers, coeffs ); const Vector ¤tLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; addLayerToParameterisedModel( gurobi, layer.second, false, currentLayerCoeffs ); } } + addPolyognalTighteningsToLpRelaxation( gurobi, layers, 0, lastLayer, polygonalTightenings ); } -void LPFormulator::createLPRelaxationAfter( const Map &layers, - GurobiWrapper &gurobi, - unsigned firstLayer, - const Vector &coeffs ) +void LPFormulator::createLPRelaxationAfter( + const Map &layers, + GurobiWrapper &gurobi, + unsigned firstLayer, + const Map> &layerIndicesToParameters, + const Vector &polygonalTightenings ) { unsigned depth = GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH; std::priority_queue, std::greater> layersToAdd; @@ -698,12 +707,10 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers continue; else { - if ( coeffs.empty() ) + if ( layerIndicesToParameters.empty() ) addLayerToModel( gurobi, currentLayer, true ); else { - Map> layerIndicesToParameters = - currentLayer->getParametersForLayers( layers, coeffs ); const Vector ¤tLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; addLayerToParameterisedModel( gurobi, currentLayer, true, currentLayerCoeffs ); @@ -718,6 +725,8 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers } } } + addPolyognalTighteningsToLpRelaxation( + gurobi, layers, firstLayer, layersToAdd.top(), polygonalTightenings ); } void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, @@ -1338,11 +1347,9 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, } double ub = - std::min( DeepPolySoftmaxElement::linearUpperBound( sourceLbs, sourceUbs, index ), - layer->getUb( i ) ); + std::min( Layer::linearUpperBound( sourceLbs, sourceUbs, index ), layer->getUb( i ) ); double lb = - std::max( DeepPolySoftmaxElement::linearLowerBound( sourceLbs, sourceUbs, index ), - layer->getLb( i ) ); + std::max( Layer::linearLowerBound( sourceLbs, sourceUbs, index ), layer->getLb( i ) ); targetLbs[index] = lb; targetUbs[index] = ub; @@ -1374,14 +1381,13 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, { terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSELowerBound( - sourceMids, sourceLbs, sourceUbs, index ); + bias = Layer::LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = DeepPolySoftmaxElement::dLSELowerBound( + double dldj = Layer::dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); @@ -1394,14 +1400,13 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, { terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSELowerBound2( - sourceMids, sourceLbs, sourceUbs, index ); + bias = Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = DeepPolySoftmaxElement::dLSELowerBound2( + double dldj = Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); @@ -1413,15 +1418,14 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSEUpperBound( - sourceMids, targetLbs, targetUbs, index ); + bias = Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = DeepPolySoftmaxElement::dLSEUpperbound( + double dudj = Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); bias -= dudj * sourceMids[inputIndex]; @@ -1433,16 +1437,15 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, { terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = - DeepPolySoftmaxElement::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + bias = Layer::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); unsigned inputIndex = 0; for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = DeepPolySoftmaxElement::dERLowerBound( - sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + double dldj = + Layer::dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); bias -= dldj * sourceMids[inputIndex]; ++inputIndex; @@ -1451,16 +1454,15 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = - DeepPolySoftmaxElement::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); + bias = Layer::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = DeepPolySoftmaxElement::dERUpperBound( - sourceMids, targetLbs, targetUbs, index, inputIndex ); + double dudj = + Layer::dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); bias -= dudj * sourceMids[inputIndex]; ++inputIndex; @@ -1483,21 +1485,20 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + Vector sourceLbs; Vector sourceUbs; Vector sourceValues; Vector sourceNeurons; - Vector sourceLayers; bool allConstant = true; for ( const auto &sourceIndex : sources ) { - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); - sourceLayers.append( sourceLayer ); sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); @@ -1546,10 +1547,10 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[1], - Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[0], - Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addGeqConstraint( terms, -sourceLbs[0] * sourceLbs[1] ); // Upper bound: out <= u_y * x + l_x * y - l_x * u_y @@ -1557,10 +1558,10 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -sourceUbs[1], - Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[0], - Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addLeqConstraint( terms, -sourceLbs[0] * sourceUbs[1] ); } } @@ -2049,21 +2050,20 @@ void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &g List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + Vector sourceLbs; Vector sourceUbs; Vector sourceValues; Vector sourceNeurons; - Vector sourceLayers; bool allConstant = true; for ( const auto &sourceIndex : sources ) { - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); - sourceLayers.append( sourceLayer ); sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); @@ -2115,17 +2115,17 @@ void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &g // Upper bound: out <= a_u * x + b_u * y + c_u, where // a_u = alpha2 * u_y + ( 1 - alpha2 ) * l_y // b_u = alpha2 * l_x + ( 1 - alpha2 ) * u_x - // c_u = -alpha2 * l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + // c_u = -alpha2 * -l_x * u_y - ( 1 - alpha2 ) * u_x * l_y List terms; terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -coeffs[0] * sourceLbs[1] - ( 1 - coeffs[0] ) * sourceUbs[1], - Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -coeffs[0] * sourceLbs[0] - ( 1 - coeffs[0] ) * sourceUbs[0], - Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addGeqConstraint( terms, -coeffs[0] * sourceLbs[0] * sourceLbs[1] - ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1] ); @@ -2134,10 +2134,10 @@ void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &g terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -coeffs[1] * sourceUbs[1] - ( 1 - coeffs[1] ) * sourceLbs[1], - Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -coeffs[1] * sourceLbs[0] - ( 1 - coeffs[1] ) * sourceUbs[0], - Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addLeqConstraint( terms, -coeffs[1] * sourceLbs[0] * sourceUbs[1] - ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1] ); @@ -2145,6 +2145,69 @@ void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &g } } +void LPFormulator::addPolyognalTighteningsToLpRelaxation( + GurobiWrapper &gurobi, + const Map &layers, + unsigned firstLayer, + unsigned lastLayer, + const Vector &polygonalTightenings ) +{ + List terms; + for ( const auto &tightening : polygonalTightenings ) + { + Map neuronToCoefficient = tightening._neuronToCoefficient; + PolygonalTightening::PolygonalBoundType type = tightening._type; + double value = tightening._value; + + bool outOfBounds = false; + for ( const auto &pair : neuronToCoefficient ) + { + unsigned currentLayerIndex = pair.first._layer; + if ( currentLayerIndex < firstLayer || currentLayerIndex > lastLayer ) + { + outOfBounds = true; + } + } + if ( outOfBounds ) + { + continue; + } + + terms.clear(); + for ( const auto &pair : neuronToCoefficient ) + { + unsigned currentLayerIndex = pair.first._layer; + unsigned i = pair.first._neuron; + double coeff = pair.second; + Layer *layer = layers[currentLayerIndex]; + + if ( !layer->neuronEliminated( i ) ) + { + unsigned variable = layer->neuronToVariable( i ); + String variableName = Stringf( "x%u", variable ); + if ( !gurobi.containsVariable( variableName ) ) + { + gurobi.addVariable( variableName, layer->getLb( i ), layer->getUb( i ) ); + } + terms.append( GurobiWrapper::Term( coeff, Stringf( "x%u", variable ) ) ); + } + else + { + value -= coeff * layer->getEliminatedNeuronValue( i ); + } + } + + if ( type == PolygonalTightening::UB ) + { + gurobi.addLeqConstraint( terms, value ); + } + else + { + gurobi.addGeqConstraint( terms, value ); + } + } +} + void LPFormulator::setCutoff( double cutoff ) { _cutoffInUse = true; diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index d4ae7af8a2..43f24e341b 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -77,7 +77,15 @@ void Layer::allocateMemory() if ( Options::get()->getSymbolicBoundTighteningType() == SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING || Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) { _symbolicLb = new double[_size * _inputLayerSize]; _symbolicUb = new double[_size * _inputLayerSize]; @@ -928,23 +936,23 @@ void Layer::computeIntervalArithmeticBoundsForSign() double lb = sourceLayer->getLb( sourceIndex._neuron ); double ub = sourceLayer->getUb( sourceIndex._neuron ); - double new_lb; - double new_ub; + double newLb; + double newUb; if ( !FloatUtils::isNegative( lb ) ) { - new_lb = 1; - new_ub = 1; + newLb = 1; + newUb = 1; } else if ( FloatUtils::isNegative( ub ) ) { - new_lb = -1; - new_ub = -1; + newLb = -1; + newUb = -1; } else { - new_lb = -1; - new_ub = 1; + newLb = -1; + newUb = 1; } /* @@ -952,16 +960,16 @@ void Layer::computeIntervalArithmeticBoundsForSign() variable. If they are tigheter than what was previously known, store them. */ - if ( _lb[i] < new_lb ) + if ( _lb[i] < newLb ) { - _lb[i] = new_lb; + _lb[i] = newLb; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( _ub[i] > new_ub ) + if ( _ub[i] > newUb ) { - _ub[i] = new_ub; + _ub[i] = newUb; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); } @@ -1225,8 +1233,8 @@ void Layer::computeIntervalArithmeticBoundsForSoftmax() } } - double lb = softmaxLinearLowerBound( sourceLbs, sourceUbs, index ); - double ub = softmaxLinearUpperBound( sourceLbs, sourceUbs, index ); + double lb = linearLowerBound( sourceLbs, sourceUbs, index ); + double ub = linearUpperBound( sourceLbs, sourceUbs, index ); if ( _lb[i] < lb ) { _lb[i] = lb; @@ -2704,6 +2712,7 @@ void Layer::computeSymbolicBoundsForSoftmax() Vector sourceMids; Vector targetLbs; Vector targetUbs; + unsigned len = 0; for ( const auto &sourceIndex : sources ) { unsigned sourceNeuron = sourceIndex._neuron; @@ -2715,6 +2724,8 @@ void Layer::computeSymbolicBoundsForSoftmax() sourceMids.append( ( sourceLb + sourceUb ) / 2 ); targetLbs.append( _lb[i] ); targetUbs.append( _ub[i] ); + + ++len; } // Find the index of i in the softmax @@ -2730,8 +2741,8 @@ void Layer::computeSymbolicBoundsForSoftmax() } } - double lb = softmaxLinearLowerBound( sourceLbs, sourceUbs, index ); - double ub = softmaxLinearUpperBound( sourceLbs, sourceUbs, index ); + double lb = linearLowerBound( sourceLbs, sourceUbs, index ); + double ub = linearUpperBound( sourceLbs, sourceUbs, index ); if ( _lb[i] < lb ) { _lb[i] = lb; @@ -2753,8 +2764,8 @@ void Layer::computeSymbolicBoundsForSoftmax() _symbolicUpperBias[i] = _ub[i]; for ( const auto &sourceIndex : sources ) { - symbolicLb[_size * sourceIndex._neuron + i] = 0; - symbolicUb[_size * sourceIndex._neuron + i] = 0; + symbolicLb[len * sourceIndex._neuron + i] = 0; + symbolicUb[len * sourceIndex._neuron + i] = 0; } } else @@ -2772,12 +2783,12 @@ void Layer::computeSymbolicBoundsForSoftmax() if ( !useLSE2 ) { _symbolicLowerBias[i] = - softmaxLSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); + LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &sourceIndex : sources ) { - double dldj = softmaxdLSELowerBound( - sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[_size * sourceIndex._neuron + i] = dldj; + double dldj = + dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + symbolicLb[len * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -2785,51 +2796,48 @@ void Layer::computeSymbolicBoundsForSoftmax() else { _symbolicLowerBias[i] = - softmaxLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); + LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &sourceIndex : sources ) { - double dldj = softmaxdLSELowerBound2( - sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[_size * sourceIndex._neuron + i] = dldj; + double dldj = + dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + symbolicLb[len * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } } - _symbolicUpperBias[i] = - softmaxLSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + _symbolicUpperBias[i] = LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &sourceIndex : sources ) { - double dudj = softmaxdLSEUpperbound( - sourceMids, targetLbs, targetUbs, index, inputIndex ); - symbolicUb[_size * sourceIndex._neuron + i] = dudj; + double dudj = + dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + symbolicUb[len * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; } } else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) { - _symbolicLowerBias[i] = - softmaxERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + _symbolicLowerBias[i] = ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); unsigned inputIndex = 0; for ( const auto &sourceIndex : sources ) { double dldj = - softmaxdERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[_size * sourceIndex._neuron + i] = dldj; + dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + symbolicLb[len * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } - _symbolicUpperBias[i] = - softmaxERUpperBound( sourceMids, targetLbs, targetUbs, index ); + _symbolicUpperBias[i] = ERUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &sourceIndex : sources ) { double dudj = - softmaxdERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - symbolicUb[_size * sourceIndex._neuron + i] = dudj; + dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + symbolicUb[len * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; } @@ -3031,26 +3039,27 @@ void Layer::computeSymbolicBoundsForBilinear() List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + Vector sourceLbs; Vector sourceUbs; Vector sourceValues; - Vector sourceNeurons; - Vector sourceLayerSizes; - Vector sourceLayers; bool allConstant = true; + unsigned indexA = 0; + unsigned indexB = 0; + unsigned counter = 0; for ( const auto &sourceIndex : sources ) { - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); - unsigned sourceLayerSize = sourceLayer->getSize(); - sourceLayers.append( sourceLayer ); - sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); - sourceLayerSizes.append( sourceLayerSize ); if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) { @@ -3061,6 +3070,16 @@ void Layer::computeSymbolicBoundsForBilinear() double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); sourceValues.append( sourceValue ); } + + if ( counter == 0 ) + { + indexA = sourceIndex._neuron; + } + else + { + indexB = sourceIndex._neuron; + } + ++counter; } if ( allConstant ) @@ -3090,75 +3109,47 @@ void Layer::computeSymbolicBoundsForBilinear() // Symbolic upper bound: // out <= alpha * x + beta * y + gamma // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y - double aLower = sourceLbs[1]; - double aUpper = sourceUbs[1]; - double bLower = sourceLbs[0]; - double bUpper = sourceLbs[0]; - _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; - _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; - for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - if ( aLower >= 0 ) + if ( sourceLbs[1] >= 0 ) { _symbolicLb[j * _size + i] += - aLower * ( sourceLayers[0] - ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; - _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; + sourceLbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; } else { _symbolicLb[j * _size + i] += - aLower * ( sourceLayers[0] - ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; - _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; + sourceLbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; } - if ( aUpper >= 0 ) + if ( sourceUbs[1] >= 0 ) { _symbolicUb[j * _size + i] += - aUpper * ( sourceLayers[0] - ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; - _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; + sourceUbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; } else { _symbolicUb[j * _size + i] += - aUpper * ( sourceLayers[0] - ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; - _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; + sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; } - if ( bLower >= 0 ) - { - _symbolicLb[j * _size + i] += - bLower * ( sourceLayers[1] - ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; - _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; - } - else + if ( sourceLbs[0] >= 0 ) { _symbolicLb[j * _size + i] += - bLower * ( sourceLayers[1] - ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; - _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; - } - - if ( bUpper >= 0 ) - { + sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; _symbolicUb[j * _size + i] += - bUpper * ( sourceLayers[1] - ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; - _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; } else { + _symbolicLb[j * _size + i] += + sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; _symbolicUb[j * _size + i] += - bUpper * ( sourceLayers[1] - ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; - _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; + sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; } } + _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; + _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); @@ -4105,26 +4096,27 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + Vector sourceLbs; Vector sourceUbs; Vector sourceValues; - Vector sourceNeurons; - Vector sourceLayerSizes; - Vector sourceLayers; bool allConstant = true; + unsigned indexA = 0; + unsigned indexB = 0; + unsigned counter = 0; for ( const auto &sourceIndex : sources ) { - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); - unsigned sourceLayerSize = sourceLayer->getSize(); - sourceLayers.append( sourceLayer ); - sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); - sourceLayerSizes.append( sourceLayerSize ); if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) { @@ -4135,6 +4127,16 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); sourceValues.append( sourceValue ); } + + if ( counter == 0 ) + { + indexA = sourceIndex._neuron; + } + else + { + indexB = sourceIndex._neuron; + } + ++counter; } if ( allConstant ) @@ -4173,74 +4175,58 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector double bLower = coeffs[0] * sourceLbs[0] + ( 1 - coeffs[0] ) * sourceUbs[0]; double bUpper = coeffs[1] * sourceLbs[0] + ( 1 - coeffs[1] ) * sourceUbs[0]; - _symbolicLowerBias[i] = -coeffs[0] * sourceLbs[0] * sourceLbs[1] - - ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1]; - _symbolicUpperBias[i] = -coeffs[1] * sourceLbs[0] * sourceUbs[1] - - ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1]; - for ( unsigned j = 0; j < _inputLayerSize; ++j ) { if ( aLower >= 0 ) { _symbolicLb[j * _size + i] += - aLower * ( sourceLayers[0] - ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; - _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; + aLower * sourceSymbolicLb[j * sourceLayerSize + indexA]; } else { _symbolicLb[j * _size + i] += - aLower * ( sourceLayers[0] - ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; - _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; + aLower * sourceSymbolicUb[j * sourceLayerSize + indexA]; } if ( aUpper >= 0 ) { _symbolicUb[j * _size + i] += - aUpper * ( sourceLayers[0] - ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; - _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; + aUpper * sourceSymbolicUb[j * sourceLayerSize + indexA]; } else { _symbolicUb[j * _size + i] += - aUpper * ( sourceLayers[0] - ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; - _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; + aUpper * sourceSymbolicLb[j * sourceLayerSize + indexA]; } if ( bLower >= 0 ) { _symbolicLb[j * _size + i] += - bLower * ( sourceLayers[1] - ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; - _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; + bLower * sourceSymbolicLb[j * sourceLayerSize + indexB]; } else { _symbolicLb[j * _size + i] += - bLower * ( sourceLayers[1] - ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; - _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + bLower * sourceSymbolicUb[j * sourceLayerSize + indexB]; } - if ( bUpper >= 0 ) + if ( bLower >= 0 ) { _symbolicUb[j * _size + i] += - bUpper * ( sourceLayers[1] - ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; - _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + bUpper * sourceSymbolicUb[j * sourceLayerSize + indexB]; } else { _symbolicUb[j * _size + i] += - bUpper * ( sourceLayers[1] - ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; - _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; + bUpper * sourceSymbolicLb[j * sourceLayerSize + indexB]; } } + _symbolicLowerBias[i] = -coeffs[0] * sourceLbs[0] * sourceLbs[1] - + ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1]; + _symbolicUpperBias[i] = -coeffs[1] * sourceLbs[0] * sourceUbs[1] - + ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1]; + double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); List values = { sourceLbs[0] * sourceLbs[1], @@ -4322,218 +4308,10 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector } } -const Vector Layer::OptimalParameterisedSymbolicBoundTightening() -{ - const Map &layers = _layerOwner->getLayerIndexToLayer(); - - // Search over coeffs in [0, 1]^number_of_parameters with projected grdient descent. - unsigned max_iterations = - GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; - double step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; - double epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; - double weight_decay = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY; - double lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; - unsigned dimension = getNumberOfParameters( layers ); - bool maximize = false; - double sign = ( maximize ? 1 : -1 ); - - Vector lower_bounds( dimension, 0 ); - Vector upper_bounds( dimension, 1 ); - - // Initialize initial guess uniformly. - Vector guess( dimension ); - std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); - std::uniform_real_distribution dis( 0, 1 ); - for ( unsigned j = 0; j < dimension; ++j ) - { - double lb = lower_bounds[j]; - double ub = upper_bounds[j]; - guess[j] = lb + dis( rng ) * ( ub - lb ); - } - - Vector> candidates( dimension ); - Vector gradient( dimension ); - - for ( unsigned i = 0; i < max_iterations; ++i ) - { - double current_cost = EstimateVolume( guess ); - for ( unsigned j = 0; j < dimension; ++j ) - { - candidates[j] = Vector( guess ); - candidates[j][j] += step_size; - - if ( candidates[j][j] > upper_bounds[j] || candidates[j][j] < lower_bounds[j] ) - { - gradient[j] = 0; - continue; - } - - double cost = EstimateVolume( candidates[j] ); - gradient[j] = ( cost - current_cost ) / step_size + weight_decay * guess[j]; - } - - bool gradient_is_zero = true; - for ( unsigned j = 0; j < dimension; ++j ) - { - if ( FloatUtils::abs( gradient[j] ) > epsilon ) - { - gradient_is_zero = false; - } - } - if ( gradient_is_zero ) - { - break; - } - - for ( unsigned j = 0; j < dimension; ++j ) - { - guess[j] += sign * lr * gradient[j]; - - guess[j] = std::min( guess[j], upper_bounds[j] ); - guess[j] = std::max( guess[j], lower_bounds[j] ); - } - } - - const Vector optimal_coeffs( guess ); - return optimal_coeffs; -} - -double Layer::EstimateVolume( const Vector &coeffs ) -{ - // First, run parameterised symbolic bound propagation. - const Map &layers = _layerOwner->getLayerIndexToLayer(); - Map> layerIndicesToParameters = - getParametersForLayers( layers, coeffs ); - for ( unsigned i = 0; i < layers.size(); ++i ) - { - ASSERT( layers.exists( i ) ); - const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; - layers[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); - } - - std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); - double log_box_volume = 0; - double sigmoid_sum = 0; - - unsigned inputLayerIndex = 0; - unsigned outputLayerIndex = layers.size() - 1; - Layer *inputLayer = layers[inputLayerIndex]; - Layer *outputLayer = layers[outputLayerIndex]; - - // Calculate volume of input variables' bounding box. - for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) - { - if ( inputLayer->neuronEliminated( index ) ) - continue; - - double lb = inputLayer->getLb( index ); - double ub = inputLayer->getUb( index ); - - if ( lb == ub ) - continue; - - log_box_volume += std::log( ub - lb ); - } - - for ( unsigned i = 0; i < GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; ++i ) - { - // Sample input point from known bounds. - Map point; - for ( unsigned j = 0; j < inputLayer->getSize(); ++j ) - { - if ( inputLayer->neuronEliminated( j ) ) - { - point.insert( j, 0 ); - } - else - { - double lb = inputLayer->getLb( j ); - double ub = inputLayer->getUb( j ); - std::uniform_real_distribution<> dis( lb, ub ); - point.insert( j, dis( rng ) ); - } - } - - // Calculate sigmoid of maximum margin from output symbolic bounds. - double max_margin = 0; - for ( unsigned j = 0; j < outputLayer->getSize(); ++j ) - { - if ( outputLayer->neuronEliminated( j ) ) - continue; - - double margin = outputLayer->calculateDifferenceFromSymbolic( point, j ); - max_margin = std::max( max_margin, margin ); - } - sigmoid_sum += SigmoidConstraint::sigmoid( max_margin ); - } - - return std::exp( log_box_volume + std::log( sigmoid_sum ) ) / - GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; -} - -double Layer::calculateDifferenceFromSymbolic( Map &point, unsigned i ) const -{ - double lowerSum = _symbolicLowerBias[i]; - double upperSum = _symbolicUpperBias[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - lowerSum += _symbolicLb[j * _size + i] * point[j]; - upperSum += _symbolicUb[j * _size + i] * point[j]; - } - - return std::max( _ub[i] - upperSum, lowerSum - _lb[i] ); -} - - -unsigned Layer::getNumberOfParametersPerType( Type t ) const -{ - if ( t == Layer::RELU || t == Layer::LEAKY_RELU ) - return 1; - - if ( t == Layer::SIGN || t == Layer::BILINEAR ) - return 2; - - return 0; -} - -unsigned Layer::getNumberOfParameters( const Map &layers ) const -{ - unsigned index = 0; - for ( auto pair : layers ) - { - unsigned layerIndex = pair.first; - Layer *layer = layers[layerIndex]; - index += getNumberOfParametersPerType( layer->getLayerType() ); - } - return index; -} - -Map> Layer::getParametersForLayers( const Map &layers, - const Vector &coeffs ) const -{ - unsigned index = 0; - Map> layerIndicesToParameters; - for ( auto pair : layers ) - { - unsigned layerIndex = pair.first; - Layer *layer = layers[layerIndex]; - unsigned n_coeffs = getNumberOfParametersPerType( layer->getLayerType() ); - Vector current_coeffs( n_coeffs ); - for ( unsigned i = 0; i < n_coeffs; i++ ) - { - current_coeffs[i] = coeffs[index + i]; - } - layerIndicesToParameters.insert( layerIndex, current_coeffs ); - index += n_coeffs; - } - return layerIndicesToParameters; -} - -double Layer::softmaxLSELowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +double Layer::LSELowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { double sum = 0; for ( unsigned j = 0; j < inputs.size(); ++j ) @@ -4548,15 +4326,15 @@ double Layer::softmaxLSELowerBound( const Vector &inputs, return std::exp( inputs[i] ) / sum; } -double Layer::softmaxdLSELowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) +double Layer::dLSELowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) { double val = 0; if ( i == di ) - val += softmaxLSELowerBound( inputMids, inputLbs, inputUbs, i ); + val += LSELowerBound( inputMids, inputLbs, inputUbs, i ); double ldi = inputLbs[di]; double udi = inputUbs[di]; @@ -4578,10 +4356,10 @@ double Layer::softmaxdLSELowerBound( const Vector &inputMids, return val; } -double Layer::softmaxLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +double Layer::LSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { double max = FloatUtils::negativeInfinity(); unsigned maxInputIndex = 0; @@ -4597,7 +4375,7 @@ double Layer::softmaxLSELowerBound2( const Vector &inputMids, } if ( maxInputIndex == i ) - return softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); + return ERLowerBound( inputMids, inputLbs, inputUbs, i ); else { double sum = 0; @@ -4620,11 +4398,11 @@ double Layer::softmaxLSELowerBound2( const Vector &inputMids, } } -double Layer::softmaxdLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) +double Layer::dLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) { double max = FloatUtils::negativeInfinity(); unsigned maxInputIndex = 0; @@ -4640,10 +4418,10 @@ double Layer::softmaxdLSELowerBound2( const Vector &inputMids, } if ( maxInputIndex == i ) - return softmaxdERLowerBound( inputMids, inputLbs, inputUbs, i, di ); + return dERLowerBound( inputMids, inputLbs, inputUbs, i, di ); else { - double val = softmaxLSELowerBound2( inputMids, inputLbs, inputUbs, i ); + double val = LSELowerBound2( inputMids, inputLbs, inputUbs, i ); double sum = 0; for ( unsigned j = 0; j < inputMids.size(); ++j ) @@ -4694,10 +4472,10 @@ double Layer::softmaxdLSELowerBound2( const Vector &inputMids, } } -double Layer::softmaxLSEUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) +double Layer::LSEUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -4710,11 +4488,11 @@ double Layer::softmaxLSEUpperBound( const Vector &inputs, SoftmaxConstraint::logSumOfExponential( inputTilda ) ); } -double Layer::softmaxdLSEUpperbound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) +double Layer::dLSEUpperbound( const Vector &inputMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -4728,10 +4506,10 @@ double Layer::softmaxdLSEUpperbound( const Vector &inputMids, return val * val2; } -double Layer::softmaxERLowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +double Layer::ERLowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { Vector inputTilda; SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); @@ -4755,13 +4533,13 @@ double Layer::softmaxERLowerBound( const Vector &inputs, return 1 / sum; } -double Layer::softmaxdERLowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) +double Layer::dERLowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) { - double val = softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); + double val = ERLowerBound( inputMids, inputLbs, inputUbs, i ); if ( i != di ) { @@ -4786,10 +4564,10 @@ double Layer::softmaxdERLowerBound( const Vector &inputMids, } } -double Layer::softmaxERUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) +double Layer::ERUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -4800,15 +4578,16 @@ double Layer::softmaxERUpperBound( const Vector &inputs, return ui + li - ui * li * SoftmaxConstraint::sumOfExponential( inputTilda ); } -double Layer::softmaxdERUpperBound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) +double Layer::dERUpperBound( const Vector &inputMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ) { double li = outputLb[i]; double ui = outputUb[i]; + if ( i == di ) { double val2 = -1; @@ -4820,9 +4599,9 @@ double Layer::softmaxdERUpperBound( const Vector &inputMids, return -li * ui * std::exp( inputMids[di] - inputMids[i] ); } -double Layer::softmaxLinearLowerBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +double Layer::linearLowerBound( const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { Vector uTilda; SoftmaxConstraint::xTilda( inputUbs, inputLbs[i], uTilda ); @@ -4830,9 +4609,9 @@ double Layer::softmaxLinearLowerBound( const Vector &inputLbs, return 1 / SoftmaxConstraint::sumOfExponential( uTilda ); } -double Layer::softmaxLinearUpperBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +double Layer::linearUpperBound( const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { Vector lTilda; SoftmaxConstraint::xTilda( inputLbs, inputUbs[i], lTilda ); From 8ee3fc6f8a9b492addb11d7a0e124b2d5ffed569 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Wed, 17 Sep 2025 21:12:13 +0300 Subject: [PATCH 75/81] Delete src/LPFormulator.cpp --- src/LPFormulator.cpp | 2217 ------------------------------------------ 1 file changed, 2217 deletions(-) delete mode 100644 src/LPFormulator.cpp diff --git a/src/LPFormulator.cpp b/src/LPFormulator.cpp deleted file mode 100644 index 67f2725155..0000000000 --- a/src/LPFormulator.cpp +++ /dev/null @@ -1,2217 +0,0 @@ -/********************* */ -/*! \file LPFormulator.cpp - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - - **/ - -#include "LPFormulator.h" - -#include "DeepPolySoftmaxElement.h" -#include "GurobiWrapper.h" -#include "InfeasibleQueryException.h" -#include "Layer.h" -#include "MStringf.h" -#include "NLRError.h" -#include "Options.h" -#include "TimeUtils.h" -#include "Vector.h" - -namespace NLR { - -LPFormulator::LPFormulator( LayerOwner *layerOwner ) - : _layerOwner( layerOwner ) - , _cutoffInUse( false ) - , _cutoffValue( 0 ) -{ -} - -LPFormulator::~LPFormulator() -{ -} - -double LPFormulator::solveLPRelaxation( GurobiWrapper &gurobi, - const Map &layers, - MinOrMax minOrMax, - String variableName, - unsigned lastLayer ) -{ - gurobi.resetModel(); - createLPRelaxation( layers, gurobi, lastLayer ); - return optimizeWithGurobi( gurobi, minOrMax, variableName, _cutoffValue ); -} - -double LPFormulator::optimizeWithGurobi( GurobiWrapper &gurobi, - MinOrMax minOrMax, - String variableName, - double cutoffValue, - std::atomic_bool *infeasible ) -{ - List terms; - terms.append( GurobiWrapper::Term( 1, variableName ) ); - - if ( minOrMax == MAX ) - gurobi.setObjective( terms ); - else - gurobi.setCost( terms ); - - gurobi.setTimeLimit( FloatUtils::infinity() ); - - gurobi.solve(); - - if ( gurobi.infeasible() ) - { - if ( infeasible ) - { - *infeasible = true; - return FloatUtils::infinity(); - } - else - throw InfeasibleQueryException(); - } - - if ( gurobi.cutoffOccurred() ) - return cutoffValue; - - if ( gurobi.optimal() ) - { - Map dontCare; - double result = 0; - gurobi.extractSolution( dontCare, result ); - return result; - } - else if ( gurobi.timeout() ) - { - return gurobi.getObjectiveBound(); - } - - throw NLRError( NLRError::UNEXPECTED_RETURN_STATUS_FROM_GUROBI ); -} - -void LPFormulator::optimizeBoundsWithIncrementalLpRelaxation( const Map &layers ) -{ - GurobiWrapper gurobi; - - List terms; - Map dontCare; - double lb = 0; - double ub = 0; - double currentLb = 0; - double currentUb = 0; - - unsigned tighterBoundCounter = 0; - unsigned signChanges = 0; - unsigned cutoffs = 0; - - struct timespec gurobiStart; - (void)gurobiStart; - struct timespec gurobiEnd; - (void)gurobiEnd; - - gurobiStart = TimeUtils::sampleMicro(); - - for ( unsigned i = 0; i < _layerOwner->getNumberOfLayers(); ++i ) - { - /* - Go over the layers, one by one. Each time encode the layer, - and then issue queries on each of its variables - */ - ASSERT( layers.exists( i ) ); - Layer *layer = layers[i]; - addLayerToModel( gurobi, layer, false ); - - for ( unsigned j = 0; j < layer->getSize(); ++j ) - { - if ( layer->neuronEliminated( j ) ) - continue; - - currentLb = layer->getLb( j ); - currentUb = layer->getUb( j ); - - if ( _cutoffInUse && ( currentLb >= _cutoffValue || currentUb <= _cutoffValue ) ) - continue; - - unsigned variable = layer->neuronToVariable( j ); - Stringf variableName( "x%u", variable ); - - terms.clear(); - terms.append( GurobiWrapper::Term( 1, variableName ) ); - - // Maximize - gurobi.reset(); - gurobi.setObjective( terms ); - gurobi.solve(); - - if ( gurobi.infeasible() ) - throw InfeasibleQueryException(); - - if ( gurobi.cutoffOccurred() ) - { - ub = _cutoffValue; - } - else if ( gurobi.optimal() ) - { - gurobi.extractSolution( dontCare, ub ); - } - else if ( gurobi.timeout() ) - { - ub = gurobi.getObjectiveBound(); - } - else - { - throw NLRError( NLRError::UNEXPECTED_RETURN_STATUS_FROM_GUROBI ); - } - - // If the bound is tighter, store it - if ( ub < currentUb ) - { - gurobi.setUpperBound( variableName, ub ); - - if ( FloatUtils::isPositive( currentUb ) && !FloatUtils::isPositive( ub ) ) - ++signChanges; - - layer->setUb( j, ub ); - _layerOwner->receiveTighterBound( Tightening( variable, ub, Tightening::UB ) ); - ++tighterBoundCounter; - - if ( _cutoffInUse && ub < _cutoffValue ) - { - ++cutoffs; - continue; - } - } - - // Minimize - gurobi.reset(); - gurobi.setCost( terms ); - gurobi.solve(); - - if ( gurobi.infeasible() ) - throw InfeasibleQueryException(); - - if ( gurobi.cutoffOccurred() ) - { - lb = _cutoffValue; - } - else if ( gurobi.optimal() ) - { - gurobi.extractSolution( dontCare, lb ); - } - else if ( gurobi.timeout() ) - { - lb = gurobi.getObjectiveBound(); - } - else - { - throw NLRError( NLRError::UNEXPECTED_RETURN_STATUS_FROM_GUROBI ); - } - - // If the bound is tighter, store it - if ( lb > currentLb ) - { - gurobi.setLowerBound( variableName, lb ); - - if ( FloatUtils::isNegative( currentLb ) && !FloatUtils::isNegative( lb ) ) - ++signChanges; - - layer->setLb( j, lb ); - _layerOwner->receiveTighterBound( Tightening( variable, lb, Tightening::LB ) ); - ++tighterBoundCounter; - - if ( _cutoffInUse && lb >= _cutoffValue ) - { - ++cutoffs; - continue; - } - } - } - } - - gurobiEnd = TimeUtils::sampleMicro(); - - LPFormulator_LOG( - Stringf( "Number of tighter bounds found by Gurobi: %u. Sign changes: %u. Cutoffs: %u\n", - tighterBoundCounter, - signChanges, - cutoffs ) - .ascii() ); - LPFormulator_LOG( Stringf( "Seconds spent Gurobiing: %llu\n", - TimeUtils::timePassed( gurobiStart, gurobiEnd ) / 1000000 ) - .ascii() ); -} - -void LPFormulator::optimizeBoundsWithLpRelaxation( - const Map &layers, - bool backward, - const Map> &layerIndicesToParameters, - const Vector &polygonalTightenings ) -{ - unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); - - Map solverToIndex; - // Create a queue of free workers - // When a worker is working, it is popped off the queue, when it is done, it - // is added back to the queue. - SolverQueue freeSolvers( numberOfWorkers ); - for ( unsigned i = 0; i < numberOfWorkers; ++i ) - { - GurobiWrapper *gurobi = new GurobiWrapper(); - solverToIndex[gurobi] = i; - enqueueSolver( freeSolvers, gurobi ); - } - - boost::thread *threads = new boost::thread[numberOfWorkers]; - std::mutex mtx; - std::atomic_bool infeasible( false ); - - std::atomic_uint tighterBoundCounter( 0 ); - std::atomic_uint signChanges( 0 ); - std::atomic_uint cutoffs( 0 ); - - struct timespec gurobiStart; - (void)gurobiStart; - struct timespec gurobiEnd; - (void)gurobiEnd; - - gurobiStart = TimeUtils::sampleMicro(); - - unsigned startIndex = backward ? layers.size() - 1 : 0; - unsigned endIndex = backward ? 0 : layers.size(); - for ( unsigned layerIndex = startIndex; layerIndex != endIndex; - backward ? --layerIndex : ++layerIndex ) - { - LPFormulator_LOG( Stringf( "Tightening bound for layer %u...", layerIndex ).ascii() ); - Layer *layer = layers[layerIndex]; - - ThreadArgument argument( layer, - &layers, - std::ref( freeSolvers ), - std::ref( mtx ), - std::ref( infeasible ), - std::ref( tighterBoundCounter ), - std::ref( signChanges ), - std::ref( cutoffs ), - layer->getLayerIndex(), - layerIndex, - threads, - &solverToIndex ); - - // optimize every neuron of layer - optimizeBoundsOfNeuronsWithLpRelaxation( - argument, backward, layerIndicesToParameters, polygonalTightenings ); - LPFormulator_LOG( Stringf( "Tightening bound for layer %u - done", layerIndex ).ascii() ); - } - - for ( unsigned i = 0; i < numberOfWorkers; ++i ) - { - threads[i].join(); - } - - gurobiEnd = TimeUtils::sampleMicro(); - - LPFormulator_LOG( - Stringf( "Number of tighter bounds found by Gurobi: %u. Sign changes: %u. Cutoffs: %u\n", - tighterBoundCounter.load(), - signChanges.load(), - cutoffs.load() ) - .ascii() ); - LPFormulator_LOG( Stringf( "Seconds spent Gurobiing: %llu\n", - TimeUtils::timePassed( gurobiStart, gurobiEnd ) / 1000000 ) - .ascii() ); - - // Clean up - clearSolverQueue( freeSolvers ); - - if ( threads ) - { - delete[] threads; - threads = NULL; - } - - if ( infeasible ) - throw InfeasibleQueryException(); -} - -void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, - unsigned targetIndex ) -{ - unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); - - Map solverToIndex; - // Create a queue of free workers - // When a worker is working, it is popped off the queue, when it is done, it - // is added back to the queue. - SolverQueue freeSolvers( numberOfWorkers ); - for ( unsigned i = 0; i < numberOfWorkers; ++i ) - { - GurobiWrapper *gurobi = new GurobiWrapper(); - solverToIndex[gurobi] = i; - enqueueSolver( freeSolvers, gurobi ); - } - - boost::thread *threads = new boost::thread[numberOfWorkers]; - std::mutex mtx; - std::atomic_bool infeasible( false ); - - std::atomic_uint tighterBoundCounter( 0 ); - std::atomic_uint signChanges( 0 ); - std::atomic_uint cutoffs( 0 ); - - struct timespec gurobiStart; - (void)gurobiStart; - struct timespec gurobiEnd; - (void)gurobiEnd; - - gurobiStart = TimeUtils::sampleMicro(); - - Layer *layer = layers[targetIndex]; - - ThreadArgument argument( layer, - &layers, - std::ref( freeSolvers ), - std::ref( mtx ), - std::ref( infeasible ), - std::ref( tighterBoundCounter ), - std::ref( signChanges ), - std::ref( cutoffs ), - layers.size() - 1, - targetIndex, - threads, - &solverToIndex ); - - // optimize every neuron of layer - optimizeBoundsOfNeuronsWithLpRelaxation( argument, false ); - - for ( unsigned i = 0; i < numberOfWorkers; ++i ) - { - threads[i].join(); - } - - gurobiEnd = TimeUtils::sampleMicro(); - - LPFormulator_LOG( - Stringf( "Number of tighter bounds found by Gurobi: %u. Sign changes: %u. Cutoffs: %u\n", - tighterBoundCounter.load(), - signChanges.load(), - cutoffs.load() ) - .ascii() ); - LPFormulator_LOG( Stringf( "Seconds spent Gurobiing: %llu\n", - TimeUtils::timePassed( gurobiStart, gurobiEnd ) / 1000000 ) - .ascii() ); - - clearSolverQueue( freeSolvers ); - - if ( infeasible ) - throw InfeasibleQueryException(); -} - -void LPFormulator::optimizeBoundsOfNeuronsWithLpRelaxation( - ThreadArgument &args, - bool backward, - const Map> &layerIndicesToParameters, - const Vector &polygonalTightenings ) -{ - unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); - - // Time to wait if no idle worker is availble - boost::chrono::milliseconds waitTime( numberOfWorkers - 1 ); - - Layer *layer = args._layer; - const Map layers = *args._layers; - unsigned targetIndex = args._targetIndex; - unsigned lastIndexOfRelaxation = args._lastIndexOfRelaxation; - - const Map solverToIndex = *args._solverToIndex; - SolverQueue &freeSolvers = args._freeSolvers; - std::mutex &mtx = args._mtx; - std::atomic_bool &infeasible = args._infeasible; - std::atomic_uint &tighterBoundCounter = args._tighterBoundCounter; - std::atomic_uint &signChanges = args._signChanges; - std::atomic_uint &cutoffs = args._cutoffs; - boost::thread *threads = args._threads; - - double currentLb; - double currentUb; - - bool skipTightenLb = false; // If true, skip lower bound tightening - bool skipTightenUb = false; // If true, skip upper bound tightening - - // declare simulations as local var to avoid a problem which can happen due to multi thread - // process. - const Vector> *simulations = - _layerOwner->getLayer( targetIndex )->getSimulations(); - - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( layer->neuronEliminated( i ) ) - continue; - - currentLb = layer->getLb( i ); - currentUb = layer->getUb( i ); - - if ( _cutoffInUse && ( currentLb >= _cutoffValue || currentUb <= _cutoffValue ) ) - continue; - - skipTightenLb = false; - skipTightenUb = false; - - // Loop for simulation - for ( const auto &simValue : ( *simulations ).get( i ) ) - { - if ( _cutoffInUse && _cutoffValue < simValue ) // If x_lower < 0 < x_sim, do not try to - // call tightning upper bound. - skipTightenUb = true; - - if ( _cutoffInUse && simValue < _cutoffValue ) // If x_sim < 0 < x_upper, do not try to - // call tightning lower bound. - skipTightenLb = true; - - if ( skipTightenUb && skipTightenLb ) - break; - } - - // If no tightning is needed, continue - if ( skipTightenUb && skipTightenLb ) - { - LPFormulator_LOG( - Stringf( - "Skip tightening lower and upper bounds for layer %d index %u", targetIndex, i ) - .ascii() ); - continue; - } - else if ( skipTightenUb ) - { - LPFormulator_LOG( - Stringf( "Skip tightening upper bound for layer %u index %u", targetIndex, i ) - .ascii() ); - } - else if ( skipTightenLb ) - { - LPFormulator_LOG( - Stringf( "Skip tightening lower bound for layer %u index %u", targetIndex, i ) - .ascii() ); - } - - if ( infeasible ) - { - // infeasibility is derived, interupt all active threads - for ( unsigned i = 0; i < numberOfWorkers; ++i ) - { - threads[i].interrupt(); - threads[i].join(); - } - - // Clean up - clearSolverQueue( freeSolvers ); - - if ( threads ) - { - delete[] threads; - threads = NULL; - } - - throw InfeasibleQueryException(); - } - - // Wait until there is an idle solver - GurobiWrapper *freeSolver; - while ( !freeSolvers.pop( freeSolver ) ) - boost::this_thread::sleep_for( waitTime ); - - freeSolver->resetModel(); - - mtx.lock(); - if ( backward ) - createLPRelaxationAfter( layers, - *freeSolver, - lastIndexOfRelaxation, - layerIndicesToParameters, - polygonalTightenings ); - else - createLPRelaxation( layers, - *freeSolver, - lastIndexOfRelaxation, - layerIndicesToParameters, - polygonalTightenings ); - mtx.unlock(); - - // spawn a thread to tighten the bounds for the current variable - ThreadArgument argument( freeSolver, - layer, - i, - currentLb, - currentUb, - _cutoffInUse, - _cutoffValue, - _layerOwner, - std::ref( freeSolvers ), - std::ref( mtx ), - std::ref( infeasible ), - std::ref( tighterBoundCounter ), - std::ref( signChanges ), - std::ref( cutoffs ), - skipTightenLb, - skipTightenUb ); - - if ( numberOfWorkers == 1 ) - tightenSingleVariableBoundsWithLPRelaxation( argument ); - else - threads[solverToIndex[freeSolver]] = - boost::thread( tightenSingleVariableBoundsWithLPRelaxation, argument ); - } -} - -void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument &argument ) -{ - try - { - GurobiWrapper *gurobi = argument._gurobi; - Layer *layer = argument._layer; - unsigned index = argument._index; - double currentLb = argument._currentLb; - double currentUb = argument._currentUb; - bool cutoffInUse = argument._cutoffInUse; - double cutoffValue = argument._cutoffValue; - LayerOwner *layerOwner = argument._layerOwner; - SolverQueue &freeSolvers = argument._freeSolvers; - std::mutex &mtx = argument._mtx; - std::atomic_bool &infeasible = argument._infeasible; - std::atomic_uint &tighterBoundCounter = argument._tighterBoundCounter; - std::atomic_uint &signChanges = argument._signChanges; - std::atomic_uint &cutoffs = argument._cutoffs; - bool skipTightenLb = argument._skipTightenLb; - bool skipTightenUb = argument._skipTightenUb; - - LPFormulator_LOG( - Stringf( "Tightening bounds for layer %u index %u", layer->getLayerIndex(), index ) - .ascii() ); - - unsigned variable = layer->neuronToVariable( index ); - Stringf variableName( "x%u", variable ); - - if ( !skipTightenUb ) - { - LPFormulator_LOG( Stringf( "Computing upperbound..." ).ascii() ); - double ub = optimizeWithGurobi( - *gurobi, MinOrMax::MAX, variableName, cutoffValue, &infeasible ) + - GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT; - ; - LPFormulator_LOG( Stringf( "Upperbound computed %f", ub ).ascii() ); - - // Store the new bound if it is tighter - if ( ub < currentUb ) - { - if ( FloatUtils::isPositive( currentUb ) && !FloatUtils::isPositive( ub ) ) - ++signChanges; - - mtx.lock(); - layer->setUb( index, ub ); - layerOwner->receiveTighterBound( Tightening( variable, ub, Tightening::UB ) ); - mtx.unlock(); - - ++tighterBoundCounter; - - if ( cutoffInUse && ub < cutoffValue ) - { - ++cutoffs; - enqueueSolver( freeSolvers, gurobi ); - return; - } - } - } - - if ( !skipTightenLb ) - { - LPFormulator_LOG( Stringf( "Computing lowerbound..." ).ascii() ); - gurobi->reset(); - double lb = optimizeWithGurobi( - *gurobi, MinOrMax::MIN, variableName, cutoffValue, &infeasible ) - - GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT; - LPFormulator_LOG( Stringf( "Lowerbound computed: %f", lb ).ascii() ); - // Store the new bound if it is tighter - if ( lb > currentLb ) - { - if ( FloatUtils::isNegative( currentLb ) && !FloatUtils::isNegative( lb ) ) - ++signChanges; - - mtx.lock(); - layer->setLb( index, lb ); - layerOwner->receiveTighterBound( Tightening( variable, lb, Tightening::LB ) ); - mtx.unlock(); - ++tighterBoundCounter; - - if ( cutoffInUse && lb > cutoffValue ) - ++cutoffs; - } - } - enqueueSolver( freeSolvers, gurobi ); - } - catch ( boost::thread_interrupted & ) - { - enqueueSolver( argument._freeSolvers, argument._gurobi ); - } -} - -void LPFormulator::createLPRelaxation( - const Map &layers, - GurobiWrapper &gurobi, - unsigned lastLayer, - const Map> &layerIndicesToParameters, - const Vector &polygonalTightenings ) -{ - for ( const auto &layer : layers ) - { - unsigned currentLayerIndex = layer.second->getLayerIndex(); - if ( currentLayerIndex > lastLayer ) - continue; - - if ( layerIndicesToParameters.empty() ) - addLayerToModel( gurobi, layer.second, false ); - else - { - const Vector ¤tLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; - addLayerToParameterisedModel( gurobi, layer.second, false, currentLayerCoeffs ); - } - } - addPolyognalTighteningsToLpRelaxation( gurobi, layers, 0, lastLayer, polygonalTightenings ); -} - -void LPFormulator::createLPRelaxationAfter( - const Map &layers, - GurobiWrapper &gurobi, - unsigned firstLayer, - const Map> &layerIndicesToParameters, - const Vector &polygonalTightenings ) -{ - unsigned depth = GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH; - std::priority_queue, std::greater> layersToAdd; - Map layerToDepth; - - layersToAdd.push( firstLayer ); - layerToDepth[firstLayer] = 0; - while ( !layersToAdd.empty() ) - { - unsigned currentLayerIndex = layersToAdd.top(); - Layer *currentLayer = layers[currentLayerIndex]; - unsigned currentDepth = layerToDepth[currentLayerIndex]; - layersToAdd.pop(); - if ( currentDepth > depth ) - continue; - else - { - if ( layerIndicesToParameters.empty() ) - addLayerToModel( gurobi, currentLayer, true ); - else - { - const Vector ¤tLayerCoeffs = - layerIndicesToParameters[currentLayerIndex]; - addLayerToParameterisedModel( gurobi, currentLayer, true, currentLayerCoeffs ); - } - - for ( const auto &nextLayer : currentLayer->getSuccessorLayers() ) - { - if ( layerToDepth.exists( nextLayer ) ) - continue; - layersToAdd.push( nextLayer ); - layerToDepth[nextLayer] = currentDepth + 1; - } - } - } - addPolyognalTighteningsToLpRelaxation( - gurobi, layers, firstLayer, layersToAdd.top(), polygonalTightenings ); -} - -void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) -{ - switch ( layer->getLayerType() ) - { - case Layer::INPUT: - addInputLayerToLpRelaxation( gurobi, layer ); - break; - - case Layer::RELU: - addReluLayerToLpRelaxation( gurobi, layer, createVariables ); - break; - - case Layer::WEIGHTED_SUM: - addWeightedSumLayerToLpRelaxation( gurobi, layer, createVariables ); - break; - - case Layer::ROUND: - addRoundLayerToLpRelaxation( gurobi, layer, createVariables ); - break; - - case Layer::LEAKY_RELU: - addLeakyReluLayerToLpRelaxation( gurobi, layer, createVariables ); - break; - - case Layer::ABSOLUTE_VALUE: - addAbsoluteValueLayerToLpRelaxation( gurobi, layer, createVariables ); - break; - - case Layer::SIGN: - addSignLayerToLpRelaxation( gurobi, layer, createVariables ); - break; - - case Layer::MAX: - addMaxLayerToLpRelaxation( gurobi, layer, createVariables ); - break; - - case Layer::SIGMOID: - addSigmoidLayerToLpRelaxation( gurobi, layer, createVariables ); - break; - - case Layer::SOFTMAX: - addSoftmaxLayerToLpRelaxation( gurobi, layer, createVariables ); - break; - - case Layer::BILINEAR: - addBilinearLayerToLpRelaxation( gurobi, layer, createVariables ); - break; - - default: - throw NLRError( NLRError::LAYER_TYPE_NOT_SUPPORTED, "LPFormulator" ); - break; - } -} - -void LPFormulator::addInputLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - unsigned variable = layer->neuronToVariable( i ); - gurobi.addVariable( Stringf( "x%u", variable ), layer->getLb( i ), layer->getUb( i ) ); - } -} - -void LPFormulator::addReluLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = sourceValue > 0 ? sourceValue : 0; - - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - gurobi.addVariable( Stringf( "x%u", targetVariable ), 0, layer->getUb( i ) ); - - if ( !FloatUtils::isNegative( sourceLb ) ) - { - // The ReLU is active, y = x - if ( sourceLb < 0 ) - sourceLb = 0; - - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addEqConstraint( terms, 0 ); - } - else if ( !FloatUtils::isPositive( sourceUb ) ) - { - // The ReLU is inactive, y = 0 - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addEqConstraint( terms, 0 ); - } - else - { - /* - The phase of this ReLU is not yet fixed. - - For y = ReLU(x), we add the following triangular relaxation: - - 1. y >= 0 - 2. y >= x - 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) - */ - - // y >= 0 - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - // y >= x, i.e. y - x >= 0 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - /* - u ul - y <= ----- x - ----- - u - l u - l - */ - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -sourceUb / ( sourceUb - sourceLb ), - Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, - ( -sourceUb * sourceLb ) / ( sourceUb - sourceLb ) ); - } - } - } -} - -void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = FloatUtils::round( sourceValue ); - - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - double ub = std::min( FloatUtils::round( sourceUb ), layer->getUb( i ) ); - double lb = std::max( FloatUtils::round( sourceLb ), layer->getLb( i ) ); - - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - // If u = l: y = round(u) - if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) - { - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addEqConstraint( terms, ub ); - } - - else - { - List terms; - // y <= x + 0.5, i.e. y - x <= 0.5 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, 0.5 ); - - // y >= x - 0.5, i.e. y - x >= -0.5 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, -0.5 ); - } - } - } -} - -void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = sourceValue > 0 ? sourceValue : -sourceValue; - - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - if ( !FloatUtils::isNegative( sourceLb ) ) - { - // The AbsoluteValue is active, y = x - if ( sourceLb < 0 ) - sourceLb = 0; - - double ub = std::min( sourceUb, layer->getUb( i ) ); - double lb = std::max( sourceLb, layer->getLb( i ) ); - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addEqConstraint( terms, 0 ); - } - else if ( !FloatUtils::isPositive( sourceUb ) ) - { - double ub = std::min( -sourceLb, layer->getUb( i ) ); - double lb = std::max( -sourceUb, layer->getLb( i ) ); - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - // The AbsoluteValue is inactive, y = -x, i.e. y + x = 0 - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addEqConstraint( terms, 0 ); - } - else - { - double ub = std::min( std::max( -sourceLb, sourceUb ), layer->getUb( i ) ); - double lb = std::max( 0.0, layer->getLb( i ) ); - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - // The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). - // y >= 0 - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - // y <= max(-lb, ub) - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addLeqConstraint( terms, ub ); - } - } - } -} - -void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = SigmoidConstraint::sigmoid( sourceValue ); - - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - double sourceUbSigmoid = SigmoidConstraint::sigmoid( sourceUb ); - double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); - - double ub = std::min( sourceUbSigmoid, layer->getUb( i ) ); - double lb = std::max( sourceLbSigmoid, layer->getLb( i ) ); - - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - // If u = l: y = sigmoid(u) - if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) - { - List terms; - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addEqConstraint( terms, ub ); - } - - else - { - List terms; - double lambda = ( ub - lb ) / ( sourceUb - sourceLb ); - double lambdaPrime = std::min( SigmoidConstraint::sigmoidDerivative( sourceLb ), - SigmoidConstraint::sigmoidDerivative( sourceUb ) ); - - // update lower bound - if ( FloatUtils::isPositive( sourceLb ) ) - { - // y >= lambda * (x - l) + sigmoid(lb), i.e. y - lambda * x >= sigmoid(lb) - - // lambda * l - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( - GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, sourceLbSigmoid - sourceLb * lambda ); - } - - else - { - // y >= lambda' * (x - l) + sigmoid(lb), i.e. y - lambda' * x >= sigmoid(lb) - - // lambda' * l - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( - GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, sourceLbSigmoid - sourceLb * lambdaPrime ); - } - - // update upper bound - if ( !FloatUtils::isPositive( sourceUb ) ) - { - // y <= lambda * (x - u) + sigmoid(ub), i.e. y - lambda * x <= sigmoid(ub) - - // lambda * u - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( - GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, sourceUbSigmoid - sourceUb * lambda ); - } - else - { - // y <= lambda' * (x - u) + sigmoid(ub), i.e. y - lambda' * x <= sigmoid(ub) - - // lambda' * u - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( - GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, sourceUbSigmoid - sourceUb * lambdaPrime ); - } - } - } - } -} - -void LPFormulator::addSignLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( layer->neuronEliminated( i ) ) - continue; - - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = FloatUtils::isNegative( sourceValue ) ? -1 : 1; - - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - if ( !FloatUtils::isNegative( sourceLb ) ) - { - // The Sign is positive, y = 1 - gurobi.addVariable( Stringf( "x%u", targetVariable ), 1, 1 ); - } - else if ( FloatUtils::isNegative( sourceUb ) ) - { - // The Sign is negative, y = -1 - gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, -1 ); - } - else - { - /* - The phase of this Sign is not yet fixed. - - For y = Sign(x), we add the following parallelogram relaxation: - - 1. y >= -1 - 2. y <= -1 - 3. y is below the line the crosses (x.lb,-1) and (0,1) - 4. y is above the line the crosses (0,-1) and (x.ub,1) - */ - - // -1 <= y <= 1 - gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, 1 ); - - /* - 2 - y <= ----- x + 1 - - l - */ - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( 2.0 / sourceLb, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, 1 ); - - /* - 2 - y >= ----- x - 1 - u - */ - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( - GurobiWrapper::Term( -2.0 / sourceUb, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, -1 ); - } - } -} - - -void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( layer->neuronEliminated( i ) ) - continue; - - unsigned targetVariable = layer->neuronToVariable( i ); - gurobi.addVariable( - Stringf( "x%u", targetVariable ), layer->getLb( i ), layer->getUb( i ) ); - - List sources = layer->getActivationSources( i ); - - bool haveFixedSourceValue = false; - double maxFixedSourceValue = FloatUtils::negativeInfinity(); - - double maxConcreteUb = FloatUtils::negativeInfinity(); - - List terms; - - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - haveFixedSourceValue = true; - double value = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - if ( value > maxFixedSourceValue ) - maxFixedSourceValue = value; - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - // Target is at least source: target - source >= 0 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - // Find maximal concrete upper bound - if ( sourceUb > maxConcreteUb ) - maxConcreteUb = sourceUb; - } - - if ( haveFixedSourceValue && ( maxConcreteUb < maxFixedSourceValue ) ) - { - // At least one of the sources has a fixed value, - // and this fixed value dominates other sources. - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addEqConstraint( terms, maxFixedSourceValue ); - } - else - { - // If we have a fixed value, it's a lower bound - if ( haveFixedSourceValue ) - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addGeqConstraint( terms, maxFixedSourceValue ); - } - - // Target must be smaller than greatest concrete upper bound - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addLeqConstraint( terms, maxConcreteUb ); - } - } -} - -void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( layer->neuronEliminated( i ) ) - continue; - - Set handledInputNeurons; - List sources = layer->getActivationSources( i ); - - Vector sourceLbs; - Vector sourceUbs; - Vector sourceMids; - Vector targetLbs; - Vector targetUbs; - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceMids.append( ( sourceLb + sourceUb ) / 2 ); - targetLbs.append( layer->getLb( i ) ); - targetUbs.append( layer->getUb( i ) ); - } - - // Find the index of i in the softmax - unsigned index = 0; - for ( const auto &source : sources ) - { - if ( handledInputNeurons.exists( source._neuron ) ) - ++index; - else - { - handledInputNeurons.insert( source._neuron ); - break; - } - } - - double ub = - std::min( Layer::linearUpperBound( sourceLbs, sourceUbs, index ), layer->getUb( i ) ); - double lb = - std::max( Layer::linearLowerBound( sourceLbs, sourceUbs, index ), layer->getLb( i ) ); - targetLbs[index] = lb; - targetUbs[index] = ub; - - unsigned targetVariable = layer->neuronToVariable( i ); - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - double bias; - SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); - List terms; - if ( FloatUtils::areEqual( lb, ub ) ) - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addEqConstraint( terms, ub ); - } - else - { - // Compute symbolic bound - if ( boundType == SoftmaxBoundType::LOG_SUM_EXP_DECOMPOSITION ) - { - bool useLSE2 = false; - for ( const auto &lb : targetLbs ) - { - if ( lb > GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD ) - useLSE2 = true; - } - unsigned inputIndex = 0; - if ( !useLSE2 ) - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = Layer::LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = Layer::dLSELowerBound( - sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - terms.append( - GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dldj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addGeqConstraint( terms, bias ); - } - else - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = Layer::dLSELowerBound2( - sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - terms.append( - GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dldj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addGeqConstraint( terms, bias ); - } - - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); - inputIndex = 0; - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = Layer::dLSEUpperbound( - sourceMids, targetLbs, targetUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dudj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addLeqConstraint( terms, bias ); - } - else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = Layer::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); - unsigned inputIndex = 0; - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = - Layer::dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dldj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addGeqConstraint( terms, bias ); - - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = Layer::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); - inputIndex = 0; - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = - Layer::dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dudj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addLeqConstraint( terms, bias ); - } - } - } -} - -void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - Vector sourceLbs; - Vector sourceUbs; - Vector sourceValues; - Vector sourceNeurons; - bool allConstant = true; - for ( const auto &sourceIndex : sources ) - { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); - - sourceNeurons.append( sourceNeuron ); - sourceLbs.append( sourceLb ); - sourceUbs.append( sourceUb ); - - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) - { - allConstant = false; - } - else - { - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - sourceValues.append( sourceValue ); - } - } - - if ( allConstant ) - { - // If the both source neurons have been eliminated, this neuron is constant - double targetValue = sourceValues[0] * sourceValues[1]; - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - continue; - } - - double lb = FloatUtils::infinity(); - double ub = FloatUtils::negativeInfinity(); - List values = { sourceLbs[0] * sourceLbs[1], - sourceLbs[0] * sourceUbs[1], - sourceUbs[0] * sourceLbs[1], - sourceUbs[0] * sourceUbs[1] }; - for ( const auto &v : values ) - { - if ( v < lb ) - lb = v; - if ( v > ub ) - ub = v; - } - - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - // Lower bound: out >= l_y * x + l_x * y - l_x * l_y - List terms; - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( - -sourceLbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); - terms.append( GurobiWrapper::Term( - -sourceLbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); - gurobi.addGeqConstraint( terms, -sourceLbs[0] * sourceLbs[1] ); - - // Upper bound: out <= u_y * x + l_x * y - l_x * u_y - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( - -sourceUbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); - terms.append( GurobiWrapper::Term( - -sourceLbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); - gurobi.addLeqConstraint( terms, -sourceLbs[0] * sourceUbs[1] ); - } - } -} - -void LPFormulator::addWeightedSumLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) -{ - if ( createVariables ) - { - for ( const auto &sourceLayerPair : layer->getSourceLayers() ) - { - const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerPair.first ); - unsigned sourceLayerSize = sourceLayerPair.second; - - for ( unsigned j = 0; j < sourceLayerSize; ++j ) - { - if ( !sourceLayer->neuronEliminated( j ) ) - { - Stringf sourceVariableName( "x%u", sourceLayer->neuronToVariable( j ) ); - if ( !gurobi.containsVariable( sourceVariableName ) ) - { - gurobi.addVariable( - sourceVariableName, sourceLayer->getLb( j ), sourceLayer->getUb( j ) ); - } - } - } - } - } - - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned variable = layer->neuronToVariable( i ); - - gurobi.addVariable( Stringf( "x%u", variable ), layer->getLb( i ), layer->getUb( i ) ); - - List terms; - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", variable ) ) ); - - double bias = -layer->getBias( i ); - - for ( const auto &sourceLayerPair : layer->getSourceLayers() ) - { - const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerPair.first ); - unsigned sourceLayerSize = sourceLayerPair.second; - - for ( unsigned j = 0; j < sourceLayerSize; ++j ) - { - double weight = layer->getWeight( sourceLayerPair.first, j, i ); - if ( !sourceLayer->neuronEliminated( j ) ) - { - Stringf sourceVariableName( "x%u", sourceLayer->neuronToVariable( j ) ); - terms.append( GurobiWrapper::Term( weight, sourceVariableName ) ); - } - else - { - bias -= weight * sourceLayer->getEliminatedNeuronValue( j ); - } - } - } - - gurobi.addEqConstraint( terms, bias ); - } - } -} - -void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) -{ - double slope = layer->getAlpha(); - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = sourceValue > 0 ? sourceValue : 0; - - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - gurobi.addVariable( - Stringf( "x%u", targetVariable ), layer->getLb( i ), layer->getUb( i ) ); - - if ( !FloatUtils::isNegative( sourceLb ) ) - { - // The LeakyReLU is active, y = x - - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addEqConstraint( terms, 0 ); - } - else if ( !FloatUtils::isPositive( sourceUb ) ) - { - // The LeakyReLU is inactive, y = alpha * x - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addEqConstraint( terms, 0 ); - } - else - { - double width = sourceUb - sourceLb; - double weight = ( sourceUb - slope * sourceLb ) / width; - double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; - - /* - The phase of this LeakyReLU is not yet fixed. - For y = LeakyReLU(x), we add the following triangular relaxation: - 1. y >= alpha * x - 2. y >= x - 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) - */ - - // y >= alpha * x - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - // y >= x, i.e. y - x >= 0 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -weight, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, bias ); - } - } - } -} - -void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - const Vector &coeffs ) -{ - switch ( layer->getLayerType() ) - { - case Layer::RELU: - addReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); - break; - - case Layer::LEAKY_RELU: - addLeakyReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); - break; - - case Layer::SIGN: - addSignLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); - break; - - case Layer::BILINEAR: - addBilinearLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); - break; - - default: - addLayerToModel( gurobi, layer, createVariables ); - break; - } -} - -void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - const Vector &coeffs ) -{ - double coeff = coeffs[0]; - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = sourceValue > 0 ? sourceValue : 0; - - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - gurobi.addVariable( Stringf( "x%u", targetVariable ), 0, layer->getUb( i ) ); - - if ( !FloatUtils::isNegative( sourceLb ) ) - { - // The ReLU is active, y = x - if ( sourceLb < 0 ) - sourceLb = 0; - - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addEqConstraint( terms, 0 ); - } - else if ( !FloatUtils::isPositive( sourceUb ) ) - { - // The ReLU is inactive, y = 0 - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addEqConstraint( terms, 0 ); - } - else - { - /* - The phase of this ReLU is not yet fixed. - - For y = ReLU(x), we add the following relaxation: - - 1. y >= 0 - 2. y >= x - 2. y >= coeff * x - 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) - */ - - // y >= 0 - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - // y >= x, i.e. y - x >= 0. - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - // y >= coeff * x, i.e. y - coeff * x >= 0 (varies continuously between y >= 0 and - // y >= alpha * x). - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -coeff, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - /* - u ul - y <= ----- x - ----- - u - l u - l - */ - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -sourceUb / ( sourceUb - sourceLb ), - Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, - ( -sourceUb * sourceLb ) / ( sourceUb - sourceLb ) ); - } - } - } -} - -void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - const Vector &coeffs ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( layer->neuronEliminated( i ) ) - continue; - - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = FloatUtils::isNegative( sourceValue ) ? -1 : 1; - - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - if ( !FloatUtils::isNegative( sourceLb ) ) - { - // The Sign is positive, y = 1 - gurobi.addVariable( Stringf( "x%u", targetVariable ), 1, 1 ); - } - else if ( FloatUtils::isNegative( sourceUb ) ) - { - // The Sign is negative, y = -1 - gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, -1 ); - } - else - { - /* - The phase of this Sign is not yet fixed. - - For y = Sign(x), we add the following parallelogram relaxation: - - 1. y >= -1 - 2. y <= -1 - 3. y is below the line the crosses (x.lb,-1) and (0,1) - 4. y is above the line the crosses (0,-1) and (x.ub,1) - */ - - // -1 <= y <= 1 - gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, 1 ); - - /* - 2 - y <= ----- * coeff[0] * x + 1 - - l - Varies continuously between y <= 1 and y <= -2/l * x + 1. - */ - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( 2.0 / sourceLb * coeffs[0], - Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, 1 ); - - /* - 2 - y >= ----- * coeffs[1] * x - 1 - u - Varies continuously between y >= -1 and y >= 2/u * x - 1. - */ - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -2.0 / sourceUb * coeffs[1], - Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, -1 ); - } - } -} - -void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - const Vector &coeffs ) -{ - double slope = layer->getAlpha(); - double coeff = coeffs[0]; - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = sourceValue > 0 ? sourceValue : 0; - - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - gurobi.addVariable( - Stringf( "x%u", targetVariable ), layer->getLb( i ), layer->getUb( i ) ); - - if ( !FloatUtils::isNegative( sourceLb ) ) - { - // The LeakyReLU is active, y = x - - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addEqConstraint( terms, 0 ); - } - else if ( !FloatUtils::isPositive( sourceUb ) ) - { - // The LeakyReLU is inactive, y = alpha * x - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addEqConstraint( terms, 0 ); - } - else - { - double width = sourceUb - sourceLb; - double weight = ( sourceUb - slope * sourceLb ) / width; - double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; - - /* - The phase of this LeakyReLU is not yet fixed. - For y = LeakyReLU(x), we add the following relaxation: - 1. y >= ((1 - alpha) * coeff + alpha) * x (varies continuously between y >= - alpha * x and y >= x). - 2. y >= x - 3. y >= alpha * x - 4. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) - */ - - // y >= ((1 - alpha) * coeff + alpha) * x - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -slope - ( 1 - slope ) * coeff, - Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - // y >= x, i.e. y - x >= 0 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - // y >= alpha * x, i.e. y - alpha * x >= 0 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -weight, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, bias ); - } - } - } -} - -void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - const Vector &coeffs ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - Vector sourceLbs; - Vector sourceUbs; - Vector sourceValues; - Vector sourceNeurons; - bool allConstant = true; - for ( const auto &sourceIndex : sources ) - { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); - - sourceNeurons.append( sourceNeuron ); - sourceLbs.append( sourceLb ); - sourceUbs.append( sourceUb ); - - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) - { - allConstant = false; - } - else - { - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - sourceValues.append( sourceValue ); - } - } - - if ( allConstant ) - { - // If the both source neurons have been eliminated, this neuron is constant - double targetValue = sourceValues[0] * sourceValues[1]; - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - continue; - } - - double lb = FloatUtils::infinity(); - double ub = FloatUtils::negativeInfinity(); - List values = { sourceLbs[0] * sourceLbs[1], - sourceLbs[0] * sourceUbs[1], - sourceUbs[0] * sourceLbs[1], - sourceUbs[0] * sourceUbs[1] }; - for ( const auto &v : values ) - { - if ( v < lb ) - lb = v; - if ( v > ub ) - ub = v; - } - - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - // Billinear linear relaxation (arXiv:2405.21063v2 [cs.LG]) - // Lower bound: out >= a_l * x + b_l * y + c_l, where - // a_l = alpha1 * l_y + ( 1 - alpha1 ) * u_y - // b_l = alpha1 * l_x + ( 1 - alpha1 ) * u_x - // c_l = -alpha1 * l_x * l_y - ( 1 - alpha1 ) * u_x * u_y - - // Upper bound: out <= a_u * x + b_u * y + c_u, where - // a_u = alpha2 * u_y + ( 1 - alpha2 ) * l_y - // b_u = alpha2 * l_x + ( 1 - alpha2 ) * u_x - // c_u = -alpha2 * -l_x * u_y - ( 1 - alpha2 ) * u_x * l_y - - List terms; - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( - -coeffs[0] * sourceLbs[1] - ( 1 - coeffs[0] ) * sourceUbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); - terms.append( GurobiWrapper::Term( - -coeffs[0] * sourceLbs[0] - ( 1 - coeffs[0] ) * sourceUbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); - gurobi.addGeqConstraint( terms, - -coeffs[0] * sourceLbs[0] * sourceLbs[1] - - ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1] ); - - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( - -coeffs[1] * sourceUbs[1] - ( 1 - coeffs[1] ) * sourceLbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); - terms.append( GurobiWrapper::Term( - -coeffs[1] * sourceLbs[0] - ( 1 - coeffs[1] ) * sourceUbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); - gurobi.addLeqConstraint( terms, - -coeffs[1] * sourceLbs[0] * sourceUbs[1] - - ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1] ); - } - } -} - -void LPFormulator::addPolyognalTighteningsToLpRelaxation( - GurobiWrapper &gurobi, - const Map &layers, - unsigned firstLayer, - unsigned lastLayer, - const Vector &polygonalTightenings ) -{ - List terms; - for ( const auto &tightening : polygonalTightenings ) - { - Map neuronToCoefficient = tightening._neuronToCoefficient; - PolygonalTightening::PolygonalBoundType type = tightening._type; - double value = tightening._value; - - bool outOfBounds = false; - for ( const auto &pair : neuronToCoefficient ) - { - unsigned currentLayerIndex = pair.first._layer; - if ( currentLayerIndex < firstLayer || currentLayerIndex > lastLayer ) - { - outOfBounds = true; - } - } - if ( outOfBounds ) - { - continue; - } - - terms.clear(); - for ( const auto &pair : neuronToCoefficient ) - { - unsigned currentLayerIndex = pair.first._layer; - unsigned i = pair.first._neuron; - double coeff = pair.second; - Layer *layer = layers[currentLayerIndex]; - - if ( !layer->neuronEliminated( i ) ) - { - unsigned variable = layer->neuronToVariable( i ); - String variableName = Stringf( "x%u", variable ); - if ( !gurobi.containsVariable( variableName ) ) - { - gurobi.addVariable( variableName, layer->getLb( i ), layer->getUb( i ) ); - } - terms.append( GurobiWrapper::Term( coeff, Stringf( "x%u", variable ) ) ); - } - else - { - value -= coeff * layer->getEliminatedNeuronValue( i ); - } - } - - if ( type == PolygonalTightening::UB ) - { - gurobi.addLeqConstraint( terms, value ); - } - else - { - gurobi.addGeqConstraint( terms, value ); - } - } -} - -void LPFormulator::setCutoff( double cutoff ) -{ - _cutoffInUse = true; - _cutoffValue = cutoff; -} - -} // namespace NLR From c55289791b5a161554530459bbd3d630e14706f1 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Wed, 17 Sep 2025 21:12:24 +0300 Subject: [PATCH 76/81] Delete src/Layer.cpp --- src/Layer.cpp | 5162 ------------------------------------------------- 1 file changed, 5162 deletions(-) delete mode 100644 src/Layer.cpp diff --git a/src/Layer.cpp b/src/Layer.cpp deleted file mode 100644 index 43f24e341b..0000000000 --- a/src/Layer.cpp +++ /dev/null @@ -1,5162 +0,0 @@ -/********************* */ -/*! \file Layer.cpp - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz, Ido Shmuel - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - - **/ - -#include "Layer.h" - -#include "Options.h" -#include "Query.h" -#include "SoftmaxConstraint.h" -#include "SymbolicBoundTighteningType.h" - -namespace NLR { - -Layer::~Layer() -{ - freeMemoryIfNeeded(); -} - -void Layer::setLayerOwner( LayerOwner *layerOwner ) -{ - _layerOwner = layerOwner; -} - -Layer::Layer( unsigned index, Type type, unsigned size, LayerOwner *layerOwner ) - : _layerIndex( index ) - , _type( type ) - , _size( size ) - , _layerOwner( layerOwner ) - , _bias( NULL ) - , _assignment( NULL ) - , _lb( NULL ) - , _ub( NULL ) - , _inputLayerSize( 0 ) - , _symbolicLb( NULL ) - , _symbolicUb( NULL ) - , _symbolicLowerBias( NULL ) - , _symbolicUpperBias( NULL ) - , _symbolicLbOfLb( NULL ) - , _symbolicUbOfLb( NULL ) - , _symbolicLbOfUb( NULL ) - , _symbolicUbOfUb( NULL ) -{ - allocateMemory(); -} - -void Layer::allocateMemory() -{ - if ( _type == WEIGHTED_SUM ) - { - _bias = new double[_size]; - std::fill_n( _bias, _size, 0 ); - } - - _lb = new double[_size]; - _ub = new double[_size]; - - std::fill_n( _lb, _size, 0 ); - std::fill_n( _ub, _size, 0 ); - - _assignment = new double[_size]; - - _simulations.assign( - _size, Vector( Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ) ) ); - - _inputLayerSize = ( _type == INPUT ) ? _size : _layerOwner->getLayer( 0 )->getSize(); - if ( Options::get()->getSymbolicBoundTighteningType() == - SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) - { - _symbolicLb = new double[_size * _inputLayerSize]; - _symbolicUb = new double[_size * _inputLayerSize]; - - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - _symbolicLowerBias = new double[_size]; - _symbolicUpperBias = new double[_size]; - - std::fill_n( _symbolicLowerBias, _size, 0 ); - std::fill_n( _symbolicUpperBias, _size, 0 ); - - _symbolicLbOfLb = new double[_size]; - _symbolicUbOfLb = new double[_size]; - _symbolicLbOfUb = new double[_size]; - _symbolicUbOfUb = new double[_size]; - - std::fill_n( _symbolicLbOfLb, _size, 0 ); - std::fill_n( _symbolicUbOfLb, _size, 0 ); - std::fill_n( _symbolicLbOfUb, _size, 0 ); - std::fill_n( _symbolicUbOfUb, _size, 0 ); - } -} - -void Layer::setAssignment( const double *values ) -{ - ASSERT( _eliminatedNeurons.empty() ); - memcpy( _assignment, values, _size * sizeof( double ) ); -} - -const double *Layer::getAssignment() const -{ - return _assignment; -} - -double Layer::getAssignment( unsigned neuron ) const -{ - return _assignment[neuron]; -} - -void Layer::setSimulations( const Vector> *values ) -{ - _simulations = *values; -} - -const Vector> *Layer::getSimulations() const -{ - return &_simulations; -} - -void Layer::computeAssignment() -{ - ASSERT( _type != INPUT ); - - if ( _type == WEIGHTED_SUM ) - { - // Initialize to bias - memcpy( _assignment, _bias, sizeof( double ) * _size ); - - // Process each of the source layers - for ( auto &sourceLayerEntry : _sourceLayers ) - { - const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerEntry.first ); - const double *sourceAssignment = sourceLayer->getAssignment(); - unsigned sourceSize = sourceLayerEntry.second; - const double *weights = _layerToWeights[sourceLayerEntry.first]; - - for ( unsigned i = 0; i < sourceSize; ++i ) - for ( unsigned j = 0; j < _size; ++j ) - _assignment[j] += ( sourceAssignment[i] * weights[i * _size + j] ); - } - } - - else if ( _type == RELU ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - double inputValue = - _layerOwner->getLayer( sourceIndex._layer )->getAssignment( sourceIndex._neuron ); - - _assignment[i] = FloatUtils::max( inputValue, 0 ); - } - } - - else if ( _type == ROUND ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - double inputValue = - _layerOwner->getLayer( sourceIndex._layer )->getAssignment( sourceIndex._neuron ); - - _assignment[i] = FloatUtils::round( inputValue ); - } - } - - else if ( _type == LEAKY_RELU ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - double inputValue = - _layerOwner->getLayer( sourceIndex._layer )->getAssignment( sourceIndex._neuron ); - ASSERT( _alpha > 0 && _alpha < 1 ); - _assignment[i] = FloatUtils::max( inputValue, _alpha * inputValue ); - } - } - - else if ( _type == ABSOLUTE_VALUE ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - double inputValue = - _layerOwner->getLayer( sourceIndex._layer )->getAssignment( sourceIndex._neuron ); - - _assignment[i] = FloatUtils::abs( inputValue ); - } - } - - else if ( _type == MAX ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - _assignment[i] = FloatUtils::negativeInfinity(); - - for ( const auto &input : _neuronToActivationSources[i] ) - { - double value = - _layerOwner->getLayer( input._layer )->getAssignment( input._neuron ); - if ( value > _assignment[i] ) - _assignment[i] = value; - } - } - } - - else if ( _type == SIGN ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - double inputValue = - _layerOwner->getLayer( sourceIndex._layer )->getAssignment( sourceIndex._neuron ); - - _assignment[i] = FloatUtils::isNegative( inputValue ) ? -1 : 1; - } - } - - else if ( _type == SIGMOID ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - double inputValue = - _layerOwner->getLayer( sourceIndex._layer )->getAssignment( sourceIndex._neuron ); - - _assignment[i] = 1 / ( 1 + std::exp( -inputValue ) ); - } - } - - else if ( _type == SOFTMAX ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - _assignment[i] = FloatUtils::negativeInfinity(); - - Vector inputs; - Vector outputs; - unsigned outputIndex = 0; - unsigned index = 0; - for ( const auto &input : _neuronToActivationSources[i] ) - { - if ( input._neuron == i ) - outputIndex = index; - double value = - _layerOwner->getLayer( input._layer )->getAssignment( input._neuron ); - inputs.append( value ); - ++index; - } - - SoftmaxConstraint::softmax( inputs, outputs ); - _assignment[i] = outputs[outputIndex]; - } - } - - else if ( _type == BILINEAR ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - _assignment[i] = 1; - for ( const auto &input : _neuronToActivationSources[i] ) - { - double value = - _layerOwner->getLayer( input._layer )->getAssignment( input._neuron ); - _assignment[i] *= value; - } - } - } - else - { - printf( "Error! Neuron type %u unsupported\n", _type ); - throw MarabouError( MarabouError::NETWORK_LEVEL_REASONER_ACTIVATION_NOT_SUPPORTED ); - } - - // Eliminated variables supersede anything else - no matter what - // was computed due to left-over weights, etc, their set values - // prevail. - for ( const auto &eliminated : _eliminatedNeurons ) - _assignment[eliminated.first] = eliminated.second; -} - -void Layer::computeSimulations() -{ - ASSERT( _type != INPUT ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - if ( _type == WEIGHTED_SUM ) - { - // Process each of the source layers - for ( auto &sourceLayerEntry : _sourceLayers ) - { - const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerEntry.first ); - const Vector> *sourceSimulations = sourceLayer->getSimulations(); - - unsigned sourceSize = sourceLayerEntry.second; - const double *weights = _layerToWeights[sourceLayerEntry.first]; - - for ( unsigned i = 0; i < _size; ++i ) - { - for ( unsigned j = 0; j < simulationSize; ++j ) - _simulations[i][j] = _bias[i]; - } - - for ( unsigned i = 0; i < sourceSize; ++i ) - for ( unsigned j = 0; j < simulationSize; ++j ) - for ( unsigned k = 0; k < _size; ++k ) - _simulations[k][j] += - ( ( *sourceSimulations ).get( i ).get( j ) * weights[i * _size + k] ); - } - } - else if ( _type == RELU ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Vector &simulations = - ( *( _layerOwner->getLayer( sourceIndex._layer )->getSimulations() ) ) - .get( sourceIndex._neuron ); - for ( unsigned j = 0; j < simulationSize; ++j ) - _simulations[i][j] = FloatUtils::max( simulations.get( j ), 0 ); - } - } - else if ( _type == LEAKY_RELU ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Vector &simulations = - ( *( _layerOwner->getLayer( sourceIndex._layer )->getSimulations() ) ) - .get( sourceIndex._neuron ); - ASSERT( _alpha > 0 && _alpha < 1 ); - for ( unsigned j = 0; j < simulationSize; ++j ) - _simulations[i][j] = - FloatUtils::max( simulations.get( j ), _alpha * simulations.get( j ) ); - } - } - else if ( _type == ABSOLUTE_VALUE ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Vector &simulations = - ( *( _layerOwner->getLayer( sourceIndex._layer )->getSimulations() ) ) - .get( sourceIndex._neuron ); - for ( unsigned j = 0; j < simulationSize; ++j ) - _simulations[i][j] = FloatUtils::abs( simulations.get( j ) ); - } - } - else if ( _type == MAX ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - for ( unsigned j = 0; j < simulationSize; ++j ) - { - _simulations[i][j] = FloatUtils::negativeInfinity(); - - for ( const auto &input : _neuronToActivationSources[i] ) - { - // double value = ( *( _layerOwner->getLayer( input._layer )->getSimulations() ) - // )[input._neuron][j]; - double value = ( *( _layerOwner->getLayer( input._layer )->getSimulations() ) ) - .get( input._neuron ) - .get( j ); - if ( value > _simulations[i][j] ) - _simulations[i][j] = value; - } - } - } - } - else if ( _type == SIGN ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Vector &simulations = - ( *( _layerOwner->getLayer( sourceIndex._layer )->getSimulations() ) ).get( i ); - for ( unsigned j = 0; j < simulationSize; ++j ) - _simulations[i][j] = FloatUtils::isNegative( simulations.get( j ) ) ? -1 : 1; - } - } - else if ( _type == SIGMOID ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Vector &simulations = - ( *( _layerOwner->getLayer( sourceIndex._layer )->getSimulations() ) ).get( i ); - for ( unsigned j = 0; j < simulationSize; ++j ) - _simulations[i][j] = 1 / ( 1 + std::exp( -simulations.get( j ) ) ); - } - } - else if ( _type == ROUND ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Vector &simulations = - ( *( _layerOwner->getLayer( sourceIndex._layer )->getSimulations() ) ).get( i ); - for ( unsigned j = 0; j < simulationSize; ++j ) - _simulations[i][j] = FloatUtils::round( simulations.get( j ) ); - } - } - else if ( _type == SOFTMAX ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - for ( unsigned j = 0; j < simulationSize; ++j ) - { - _simulations[i][j] = FloatUtils::negativeInfinity(); - - Vector inputs; - Vector outputs; - unsigned outputIndex = 0; - unsigned index = 0; - - for ( const auto &input : _neuronToActivationSources[i] ) - { - if ( input._neuron == i ) - outputIndex = index; - double value = ( *( _layerOwner->getLayer( input._layer )->getSimulations() ) ) - .get( input._neuron ) - .get( j ); - inputs.append( value ); - ++index; - } - - SoftmaxConstraint::softmax( inputs, outputs ); - _simulations[i][j] = outputs[outputIndex]; - } - } - } - else if ( _type == BILINEAR ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - for ( unsigned j = 0; j < simulationSize; ++j ) - { - _simulations[i][j] = 1; - for ( const auto &input : _neuronToActivationSources[i] ) - { - double value = ( *( _layerOwner->getLayer( input._layer )->getSimulations() ) ) - .get( input._neuron ) - .get( j ); - _simulations[i][j] *= value; - } - } - } - } - else - { - printf( "Error! Neuron type %u unsupported\n", _type ); - throw MarabouError( MarabouError::NETWORK_LEVEL_REASONER_ACTIVATION_NOT_SUPPORTED ); - } - - // Eliminated variables supersede anything else - no matter what - // was computed due to left-over weights, etc, their set values - // prevail. - for ( const auto &eliminated : _eliminatedNeurons ) - { - for ( unsigned j = 0; j < simulationSize; ++j ) - _simulations[eliminated.first][j] = eliminated.second; - } -} - -void Layer::addSourceLayer( unsigned layerNumber, unsigned layerSize ) -{ - ASSERT( _type != INPUT ); - - if ( _sourceLayers.exists( layerNumber ) ) - return; - - _sourceLayers[layerNumber] = layerSize; - - if ( _type == WEIGHTED_SUM ) - { - _layerToWeights[layerNumber] = new double[layerSize * _size]; - _layerToPositiveWeights[layerNumber] = new double[layerSize * _size]; - _layerToNegativeWeights[layerNumber] = new double[layerSize * _size]; - - std::fill_n( _layerToWeights[layerNumber], layerSize * _size, 0 ); - std::fill_n( _layerToPositiveWeights[layerNumber], layerSize * _size, 0 ); - std::fill_n( _layerToNegativeWeights[layerNumber], layerSize * _size, 0 ); - } -} - -const Map &Layer::getSourceLayers() const -{ - return _sourceLayers; -} - -void Layer::addSuccessorLayer( unsigned layerNumber ) -{ - _successorLayers.insert( layerNumber ); -} - -const Set &Layer::getSuccessorLayers() const -{ - return _successorLayers; -} - -const double *Layer::getWeightMatrix( unsigned sourceLayer ) const -{ - ASSERT( _layerToWeights.exists( sourceLayer ) ); - return _layerToWeights[sourceLayer]; -} - -void Layer::removeSourceLayer( unsigned sourceLayer ) -{ - ASSERT( _sourceLayers.exists( sourceLayer ) ); - - delete[] _layerToWeights[sourceLayer]; - delete[] _layerToPositiveWeights[sourceLayer]; - delete[] _layerToNegativeWeights[sourceLayer]; - - _sourceLayers.erase( sourceLayer ); - _layerToWeights.erase( sourceLayer ); - _layerToPositiveWeights.erase( sourceLayer ); - _layerToNegativeWeights.erase( sourceLayer ); -} - -void Layer::setWeight( unsigned sourceLayer, - unsigned sourceNeuron, - unsigned targetNeuron, - double weight ) -{ - unsigned index = sourceNeuron * _size + targetNeuron; - _layerToWeights[sourceLayer][index] = weight; - - if ( weight > 0 ) - { - _layerToPositiveWeights[sourceLayer][index] = weight; - _layerToNegativeWeights[sourceLayer][index] = 0; - } - else - { - _layerToPositiveWeights[sourceLayer][index] = 0; - _layerToNegativeWeights[sourceLayer][index] = weight; - } -} - -double Layer::getWeight( unsigned sourceLayer, unsigned sourceNeuron, unsigned targetNeuron ) const -{ - unsigned index = sourceNeuron * _size + targetNeuron; - return _layerToWeights[sourceLayer][index]; -} - -double *Layer::getWeights( unsigned sourceLayerIndex ) const -{ - return _layerToWeights[sourceLayerIndex]; -} - -double *Layer::getPositiveWeights( unsigned sourceLayerIndex ) const -{ - return _layerToPositiveWeights[sourceLayerIndex]; -} - -double *Layer::getNegativeWeights( unsigned sourceLayerIndex ) const -{ - return _layerToNegativeWeights[sourceLayerIndex]; -} - -void Layer::setBias( unsigned neuron, double bias ) -{ - _bias[neuron] = bias; -} - -double Layer::getBias( unsigned neuron ) const -{ - return _bias[neuron]; -} - -double *Layer::getBiases() const -{ - return _bias; -} - -void Layer::addActivationSource( unsigned sourceLayer, - unsigned sourceNeuron, - unsigned targetNeuron ) -{ - ASSERT( _type == RELU || _type == LEAKY_RELU || _type == ABSOLUTE_VALUE || _type == MAX || - _type == ROUND || _type == SIGN || _type == SIGMOID || _type == SOFTMAX || - _type == BILINEAR ); - - if ( !_neuronToActivationSources.exists( targetNeuron ) ) - _neuronToActivationSources[targetNeuron] = List(); - - _neuronToActivationSources[targetNeuron].append( NeuronIndex( sourceLayer, sourceNeuron ) ); - - DEBUG( { - if ( _type == RELU || _type == LEAKY_RELU || _type == ABSOLUTE_VALUE || _type == SIGN || - _type == ROUND ) - ASSERT( _neuronToActivationSources[targetNeuron].size() == 1 ); - } ); -} - -List Layer::getActivationSources( unsigned neuron ) const -{ - return _neuronToActivationSources[neuron]; -} - -unsigned Layer::getSize() const -{ - return _size; -} - -Layer::Type Layer::getLayerType() const -{ - return _type; -} - -void Layer::setNeuronVariable( unsigned neuron, unsigned variable ) -{ - ASSERT( !_eliminatedNeurons.exists( neuron ) ); - - _neuronToVariable[neuron] = variable; - _variableToNeuron[variable] = neuron; -} - -void Layer::obtainCurrentBounds( const Query &inputQuery ) -{ - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _neuronToVariable.exists( i ) ) - { - unsigned variable = _neuronToVariable[i]; - _lb[i] = inputQuery.getLowerBound( variable ); - _ub[i] = inputQuery.getUpperBound( variable ); - } - else - { - ASSERT( _eliminatedNeurons.exists( i ) ); - _lb[i] = _eliminatedNeurons[i]; - _ub[i] = _eliminatedNeurons[i]; - } - } -} - -void Layer::obtainCurrentBounds() -{ - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _neuronToVariable.exists( i ) ) - { - unsigned variable = _neuronToVariable[i]; - _lb[i] = _layerOwner->getTableau()->getLowerBound( variable ); - _ub[i] = _layerOwner->getTableau()->getUpperBound( variable ); - } - else - { - ASSERT( _eliminatedNeurons.exists( i ) ); - _lb[i] = _eliminatedNeurons[i]; - _ub[i] = _eliminatedNeurons[i]; - } - } -} - -double Layer::getLb( unsigned neuron ) const -{ - if ( _eliminatedNeurons.exists( neuron ) ) - return _eliminatedNeurons[neuron]; - - return _lb[neuron]; -} - -double Layer::getUb( unsigned neuron ) const -{ - if ( _eliminatedNeurons.exists( neuron ) ) - return _eliminatedNeurons[neuron]; - - return _ub[neuron]; -} - -double *Layer::getLbs() const -{ - return _lb; -} - -double *Layer::getUbs() const -{ - return _ub; -} - -void Layer::setLb( unsigned neuron, double bound ) -{ - ASSERT( !_eliminatedNeurons.exists( neuron ) ); - _lb[neuron] = bound; -} - -void Layer::setUb( unsigned neuron, double bound ) -{ - ASSERT( !_eliminatedNeurons.exists( neuron ) ); - _ub[neuron] = bound; -} - -void Layer::computeIntervalArithmeticBounds() -{ - ASSERT( _type != INPUT ); - - switch ( _type ) - { - case WEIGHTED_SUM: - computeIntervalArithmeticBoundsForWeightedSum(); - break; - - case RELU: - computeIntervalArithmeticBoundsForRelu(); - break; - - case ABSOLUTE_VALUE: - computeIntervalArithmeticBoundsForAbs(); - break; - - case SIGN: - computeIntervalArithmeticBoundsForSign(); - break; - - case ROUND: - computeIntervalArithmeticBoundsForRound(); - break; - - case LEAKY_RELU: - computeIntervalArithmeticBoundsForLeakyRelu(); - break; - - case SIGMOID: - computeIntervalArithmeticBoundsForSigmoid(); - break; - - case MAX: - computeIntervalArithmeticBoundsForMax(); - break; - - case SOFTMAX: - computeIntervalArithmeticBoundsForSoftmax(); - break; - - case BILINEAR: - computeIntervalArithmeticBoundsForBilinear(); - break; - - default: - printf( "Error! Activation type %u unsupported\n", _type ); - throw MarabouError( MarabouError::NETWORK_LEVEL_REASONER_ACTIVATION_NOT_SUPPORTED ); - break; - } -} - -void Layer::computeIntervalArithmeticBoundsForWeightedSum() -{ - double *newLb = new double[_size]; - double *newUb = new double[_size]; - - for ( unsigned i = 0; i < _size; ++i ) - { - newLb[i] = _bias[i]; - newUb[i] = _bias[i]; - } - - for ( const auto &sourceLayerEntry : _sourceLayers ) - { - unsigned sourceLayerIndex = sourceLayerEntry.first; - unsigned sourceLayerSize = sourceLayerEntry.second; - const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerIndex ); - const double *weights = _layerToWeights[sourceLayerIndex]; - - for ( unsigned i = 0; i < _size; ++i ) - { - for ( unsigned j = 0; j < sourceLayerSize; ++j ) - { - double previousLb = sourceLayer->getLb( j ); - double previousUb = sourceLayer->getUb( j ); - double weight = weights[j * _size + i]; - - if ( weight > 0 ) - { - newLb[i] += weight * previousLb; - newUb[i] += weight * previousUb; - } - else - { - newLb[i] += weight * previousUb; - newUb[i] += weight * previousLb; - } - } - } - } - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - if ( _lb[i] < newLb[i] ) - { - _lb[i] = newLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - if ( _ub[i] > newUb[i] ) - { - _ub[i] = newUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } - - delete[] newLb; - delete[] newUb; -} - -void Layer::computeIntervalArithmeticBoundsForRelu() -{ - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - double lb = sourceLayer->getLb( sourceIndex._neuron ); - double ub = sourceLayer->getUb( sourceIndex._neuron ); - - if ( lb < 0 ) - lb = 0; - - if ( _lb[i] < lb ) - { - _lb[i] = lb; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - if ( _ub[i] > ub ) - { - _ub[i] = ub; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeIntervalArithmeticBoundsForAbs() -{ - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - double lb = sourceLayer->getLb( sourceIndex._neuron ); - double ub = sourceLayer->getUb( sourceIndex._neuron ); - - if ( lb > 0 ) - { - if ( _lb[i] < lb ) - { - _lb[i] = lb; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - if ( _ub[i] > ub ) - { - _ub[i] = ub; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } - else if ( ub < 0 ) - { - if ( _lb[i] < -ub ) - { - _lb[i] = -ub; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - if ( _ub[i] > -lb ) - { - _ub[i] = -lb; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } - else - { - // lb < 0 < ub - if ( _lb[i] < 0 ) - { - _lb[i] = 0; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > FloatUtils::max( ub, -lb ) ) - { - _ub[i] = FloatUtils::max( ub, -lb ); - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } - } -} - -void Layer::computeIntervalArithmeticBoundsForSign() -{ - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - double lb = sourceLayer->getLb( sourceIndex._neuron ); - double ub = sourceLayer->getUb( sourceIndex._neuron ); - - double newLb; - double newUb; - - if ( !FloatUtils::isNegative( lb ) ) - { - newLb = 1; - newUb = 1; - } - else if ( FloatUtils::isNegative( ub ) ) - { - newLb = -1; - newUb = -1; - } - else - { - newLb = -1; - newUb = 1; - } - - /* - We now have the tightest bounds we can for the relu - variable. If they are tigheter than what was previously - known, store them. - */ - if ( _lb[i] < newLb ) - { - _lb[i] = newLb; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > newUb ) - { - _ub[i] = newUb; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeIntervalArithmeticBoundsForLeakyRelu() -{ - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - double lb = sourceLayer->getLb( sourceIndex._neuron ); - double ub = sourceLayer->getUb( sourceIndex._neuron ); - - if ( lb > 0 ) - { - if ( _lb[i] < lb ) - { - _lb[i] = lb; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - if ( _ub[i] > ub ) - { - _ub[i] = ub; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } - else if ( ub < 0 ) - { - if ( _lb[i] < _alpha * lb ) - { - _lb[i] = _alpha * lb; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - if ( _ub[i] > _alpha * ub ) - { - _ub[i] = _alpha * ub; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } - else - { - // lb < 0 < ub - if ( _lb[i] < _alpha * lb ) - { - _lb[i] = _alpha * lb; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > ub ) - { - _ub[i] = ub; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } - } -} - -void Layer::computeIntervalArithmeticBoundsForSigmoid() -{ - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - double lb = sourceLayer->getLb( sourceIndex._neuron ); - double ub = sourceLayer->getUb( sourceIndex._neuron ); - - double lbSigmoid = SigmoidConstraint::sigmoid( lb ); - double ubSigmoid = SigmoidConstraint::sigmoid( ub ); - - - if ( _lb[i] < lbSigmoid ) - { - _lb[i] = lbSigmoid; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - if ( _ub[i] > ubSigmoid ) - { - _ub[i] = ubSigmoid; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeIntervalArithmeticBoundsForRound() -{ - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - double lb = sourceLayer->getLb( sourceIndex._neuron ); - double ub = sourceLayer->getUb( sourceIndex._neuron ); - - double lbRound = FloatUtils::round( lb ); - double ubRound = FloatUtils::round( ub ); - - - if ( _lb[i] < lbRound ) - { - _lb[i] = lbRound; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - if ( _ub[i] > ubRound ) - { - _ub[i] = ubRound; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeIntervalArithmeticBoundsForMax() -{ - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - ASSERT( _neuronToActivationSources.exists( i ) ); - List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); - double maxLowerBound = FloatUtils::negativeInfinity(); - double maxUpperBound = FloatUtils::negativeInfinity(); - - Map sourceLbs; - Map sourceUbs; - unsigned counter = 0; - for ( const auto &sourceIndex : sources ) - { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - - sourceLbs[sourceIndex] = sourceLb; - sourceUbs[sourceIndex] = sourceUb; - - if ( maxLowerBound < sourceLb ) - { - indexOfMaxLowerBound = sourceIndex; - maxLowerBound = sourceLb; - } - if ( maxUpperBound < sourceUb ) - { - maxUpperBound = sourceUb; - } - ++counter; - } - - // The phase is fixed if the lower-bound of a source variable x_b is - // larger than the upper-bounds of the other source variables. - bool phaseFixed = true; - for ( const auto &sourceIndex : sources ) - { - if ( sourceIndex != indexOfMaxLowerBound && - FloatUtils::gt( sourceUbs[sourceIndex], maxLowerBound ) ) - { - phaseFixed = false; - break; - } - } - - if ( phaseFixed ) - { - // Phase fixed - // Concrete bound: lb_b <= x_f <= ub_b - if ( _lb[i] < maxLowerBound ) - { - _lb[i] = maxLowerBound; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > sourceUbs[indexOfMaxLowerBound] ) - { - _ub[i] = sourceUbs[indexOfMaxLowerBound]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } - else - { - // MaxPool not fixed - // Concrete bounds: lb_b <= x_f <= maxUpperBound - if ( _lb[i] < maxLowerBound ) - { - _lb[i] = maxLowerBound; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > maxUpperBound ) - { - _ub[i] = maxUpperBound; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } - } -} - -void Layer::computeIntervalArithmeticBoundsForSoftmax() -{ - Set handledInputNeurons; - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - ASSERT( _neuronToActivationSources.exists( i ) ); - List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - ASSERT( sourceLayer->getSize() == _size ); - - Vector sourceLbs; - Vector sourceUbs; - for ( const auto &sourceIndex : sources ) - { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - - sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - } - - // Find the index of i in the softmax - unsigned index = 0; - for ( const auto &sourceIndex : sources ) - { - if ( handledInputNeurons.exists( sourceIndex._neuron ) ) - ++index; - else - { - handledInputNeurons.insert( sourceIndex._neuron ); - break; - } - } - - double lb = linearLowerBound( sourceLbs, sourceUbs, index ); - double ub = linearUpperBound( sourceLbs, sourceUbs, index ); - if ( _lb[i] < lb ) - { - _lb[i] = lb; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - if ( _ub[i] > ub ) - { - _ub[i] = ub; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeIntervalArithmeticBoundsForBilinear() -{ - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - ASSERT( _neuronToActivationSources.exists( i ) ); - List sources = getActivationSources( i ); - ASSERT( sources.size() == 2 ); - - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - Vector sourceLbs; - Vector sourceUbs; - Vector sourceValues; - bool allConstant = true; - for ( const auto &sourceIndex : sources ) - { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - - sourceLbs.append( sourceLb ); - sourceUbs.append( sourceUb ); - - if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) - { - allConstant = false; - } - else - { - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - sourceValues.append( sourceValue ); - } - } - - if ( allConstant ) - { - // If the both source neurons have been eliminated, this neuron is constant - double value = sourceValues[0] * sourceValues[1]; - - if ( _lb[i] < value ) - { - _lb[i] = value; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > value ) - { - _ub[i] = value; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - continue; - } - - double lb = FloatUtils::infinity(); - double ub = FloatUtils::negativeInfinity(); - List values = { sourceLbs[0] * sourceLbs[1], - sourceLbs[0] * sourceUbs[1], - sourceUbs[0] * sourceLbs[1], - sourceUbs[0] * sourceUbs[1] }; - for ( const auto &v : values ) - { - if ( v < lb ) - lb = v; - if ( v > ub ) - ub = v; - } - - - if ( _lb[i] < lb ) - { - _lb[i] = lb; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > ub ) - { - _ub[i] = ub; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeSymbolicBounds() -{ - switch ( _type ) - { - case INPUT: - computeSymbolicBoundsForInput(); - break; - - case WEIGHTED_SUM: - computeSymbolicBoundsForWeightedSum(); - break; - - case RELU: - computeSymbolicBoundsForRelu(); - break; - - case SIGN: - computeSymbolicBoundsForSign(); - break; - - case ABSOLUTE_VALUE: - computeSymbolicBoundsForAbsoluteValue(); - break; - - case LEAKY_RELU: - computeSymbolicBoundsForLeakyRelu(); - break; - - case ROUND: - computeSymbolicBoundsForRound(); - break; - - case SIGMOID: - computeSymbolicBoundsForSigmoid(); - break; - - case MAX: - computeSymbolicBoundsForMax(); - break; - - case SOFTMAX: - computeSymbolicBoundsForSoftmax(); - break; - - case BILINEAR: - computeSymbolicBoundsForBilinear(); - break; - - default: - computeSymbolicBoundsDefault(); - break; - } -} - -void Layer::computeSymbolicBoundsDefault() -{ - // This is the default operation, for layers that are not - // supported yet. The "symbolic" bounds computed are just the - // concrete bounds. - - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - double lb; - double ub; - - if ( _eliminatedNeurons.exists( i ) ) - { - lb = _eliminatedNeurons[i]; - ub = _eliminatedNeurons[i]; - } - else - { - lb = _lb[i]; - ub = _ub[i]; - } - - _symbolicLowerBias[i] = lb; - _symbolicUpperBias[i] = ub; - - _symbolicLbOfLb[i] = lb; - _symbolicUbOfLb[i] = ub; - _symbolicLbOfUb[i] = lb; - _symbolicUbOfUb[i] = ub; - } -} - -void Layer::computeSymbolicBoundsForInput() -{ - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - // For the input layer, the bounds are just the identity polynomials - for ( unsigned i = 0; i < _size; ++i ) - { - _symbolicLb[_size * i + i] = 1; - _symbolicUb[_size * i + i] = 1; - - _symbolicLowerBias[i] = 0; - _symbolicUpperBias[i] = 0; - - double lb = _lb[i]; - double ub = _ub[i]; - - if ( _eliminatedNeurons.exists( i ) ) - { - lb = _eliminatedNeurons[i]; - ub = _eliminatedNeurons[i]; - } - - _symbolicLbOfLb[i] = lb; - _symbolicUbOfLb[i] = ub; - _symbolicLbOfUb[i] = lb; - _symbolicUbOfUb[i] = ub; - } -} - -void Layer::computeSymbolicBoundsForRelu() -{ - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - } - } - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - /* - There are two ways we can determine that a ReLU has become fixed: - - 1. If the ReLU's variable has been externally fixed - 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) - */ - PhaseStatus reluPhase = PHASE_NOT_FIXED; - - // Has the f variable been eliminated or fixed? - if ( FloatUtils::isPositive( _lb[i] ) ) - reluPhase = RELU_PHASE_ACTIVE; - else if ( FloatUtils::isZero( _ub[i] ) ) - reluPhase = RELU_PHASE_INACTIVE; - - ASSERT( _neuronToActivationSources.exists( i ) ); - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - /* - A ReLU initially "inherits" the symbolic bounds computed - for its input variable - */ - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; - } - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; - _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - - double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); - double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - - _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); - _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); - _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); - _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - // Has the b variable been fixed? - if ( !FloatUtils::isNegative( sourceLb ) ) - { - reluPhase = RELU_PHASE_ACTIVE; - } - else if ( !FloatUtils::isPositive( sourceUb ) ) - { - reluPhase = RELU_PHASE_INACTIVE; - } - - if ( reluPhase == PHASE_NOT_FIXED ) - { - // If we got here, we know that lbLb < 0 and ubUb - // > 0 There are four possible cases, depending on - // whether ubLb and lbUb are negative or positive - // (see Neurify paper, page 14). - - // Upper bound - if ( _symbolicLbOfUb[i] <= 0 ) - { - // lbOfUb[i] < 0 < ubOfUb[i] - // Concretize the upper bound using the Ehler's-like approximation - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _symbolicUb[j * _size + i] = _symbolicUb[j * _size + i] * _symbolicUbOfUb[i] / - ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); - - // Do the same for the bias, and then adjust - _symbolicUpperBias[i] = _symbolicUpperBias[i] * _symbolicUbOfUb[i] / - ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); - _symbolicUpperBias[i] -= _symbolicLbOfUb[i] * _symbolicUbOfUb[i] / - ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); - } - - // Lower bound - if ( _symbolicUbOfLb[i] <= 0 ) - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _symbolicLb[j * _size + i] = 0; - - _symbolicLowerBias[i] = 0; - } - else - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _symbolicLb[j * _size + i] = _symbolicLb[j * _size + i] * _symbolicUbOfLb[i] / - ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); - - _symbolicLowerBias[i] = _symbolicLowerBias[i] * _symbolicUbOfLb[i] / - ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); - } - - _symbolicLbOfLb[i] = 0; - } - else - { - // The phase of this ReLU is fixed! - if ( reluPhase == RELU_PHASE_ACTIVE ) - { - // Active ReLU, bounds are propagated as is - } - else - { - // Inactive ReLU, returns zero - _symbolicLbOfLb[i] = 0; - _symbolicUbOfLb[i] = 0; - _symbolicLbOfUb[i] = 0; - _symbolicUbOfUb[i] = 0; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] = 0; - _symbolicLb[j * _size + i] = 0; - } - - _symbolicLowerBias[i] = 0; - _symbolicUpperBias[i] = 0; - } - } - - if ( _symbolicLbOfUb[i] < 0 ) - _symbolicLbOfUb[i] = 0; - - /* - We now have the tightest bounds we can for the relu - variable. If they are tigheter than what was previously - known, store them. - */ - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeSymbolicBoundsForSign() -{ - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - // Eliminate neurons are skipped - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - - continue; - } - - /* - There are two ways we can determine that a Sign has become fixed: - - 1. If the Sign's variable has been externally fixed - 2. lbLb >= 0 (Positive) or ubUb < 0 (Negative) - */ - PhaseStatus signPhase = PHASE_NOT_FIXED; - - // Has the f variable been eliminated or fixed? - if ( !FloatUtils::isNegative( _lb[i] ) ) - signPhase = SIGN_PHASE_POSITIVE; - else if ( FloatUtils::isNegative( _ub[i] ) ) - signPhase = SIGN_PHASE_NEGATIVE; - - ASSERT( _neuronToActivationSources.exists( i ) ); - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - /* - A Sign initially "inherits" the symbolic bounds computed - for its input variable - */ - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; - } - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; - _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - - double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); - double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - - _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); - _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); - _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); - _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - // Has the b variable been fixed? - if ( !FloatUtils::isNegative( sourceLb ) ) - { - signPhase = SIGN_PHASE_POSITIVE; - } - else if ( FloatUtils::isNegative( sourceUb ) ) - { - signPhase = SIGN_PHASE_NEGATIVE; - } - - if ( signPhase == PHASE_NOT_FIXED ) - { - PhaseStatus upperSignPhase = PHASE_NOT_FIXED; - PhaseStatus lowerSignPhase = PHASE_NOT_FIXED; - - // If we got here, we know that lbLb < 0 and ubUb - // > 0 - - // Upper bound - if ( !FloatUtils::isNegative( _symbolicLbOfUb[i] ) ) - { - // The upper bound is strictly positive - turns into - // the constant 1 - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _symbolicUb[j * _size + i] = 0; - - _symbolicUpperBias[i] = 1; - - upperSignPhase = SIGN_PHASE_POSITIVE; - } - else - { - // The upper bound's phase is not fixed, use the - // parallelogram approximation - double factor = -2.0 / _symbolicLbOfLb[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _symbolicUb[j * _size + i] *= factor; - - // Do the same for the bias, and then adjust - _symbolicUpperBias[i] *= factor; - _symbolicUpperBias[i] += 1; - } - - // Lower bound - if ( FloatUtils::isNegative( _symbolicUbOfLb[i] ) ) - { - // The lower bound is strictly negative - turns into - // the constant -1 - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _symbolicLb[j * _size + i] = 0; - - _symbolicLowerBias[i] = -1; - - lowerSignPhase = SIGN_PHASE_NEGATIVE; - } - else - { - // The lower bound's phase is not fixed, use the - // parallelogram approximation - double factor = 2.0 / _symbolicUbOfUb[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] *= factor; - } - - // Do the same for the bias, and then adjust - _symbolicLowerBias[i] *= factor; - _symbolicLowerBias[i] -= 1; - } - - if ( upperSignPhase == PHASE_NOT_FIXED ) - { - _symbolicUbOfUb[i] = 1; - _symbolicLbOfUb[i] = -1; - } - else - { - _symbolicUbOfUb[i] = 1; - _symbolicLbOfUb[i] = 1; - } - - if ( lowerSignPhase == PHASE_NOT_FIXED ) - { - _symbolicUbOfLb[i] = 1; - _symbolicLbOfLb[i] = -1; - } - else - { - _symbolicUbOfLb[i] = -1; - _symbolicLbOfLb[i] = -1; - } - } - else - { - // The phase of this Sign is fixed! - double constant = ( signPhase == SIGN_PHASE_POSITIVE ) ? 1 : -1; - - _symbolicLbOfLb[i] = constant; - _symbolicUbOfLb[i] = constant; - _symbolicLbOfUb[i] = constant; - _symbolicUbOfUb[i] = constant; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] = 0; - _symbolicLb[j * _size + i] = 0; - } - - _symbolicLowerBias[i] = constant; - _symbolicUpperBias[i] = constant; - } - - if ( _symbolicLbOfLb[i] < -1 ) - _symbolicLbOfLb[i] = -1; - if ( _symbolicUbOfUb[i] > 1 ) - _symbolicUbOfUb[i] = 1; - - /* - We now have the tightest bounds we can for the sign - variable. If they are tigheter than what was previously - known, store them. - */ - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeSymbolicBoundsForAbsoluteValue() -{ - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - - continue; - } - - PhaseStatus absPhase = PHASE_NOT_FIXED; - - ASSERT( _neuronToActivationSources.exists( i ) ); - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; - } - - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; - _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - - double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); - double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - - _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); - _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); - _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); - _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - if ( sourceLb >= 0 ) - absPhase = ABS_PHASE_POSITIVE; - else if ( sourceUb <= 0 ) - absPhase = ABS_PHASE_NEGATIVE; - - if ( absPhase == PHASE_NOT_FIXED ) - { - // If we got here, we know that lbOfLb < 0 < ubOfUb. In this case, - // we do naive concretization: lb is 0, ub is the max between - // -lb and ub of the input neuron - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = 0; - _symbolicUb[j * _size + i] = 0; - } - - _symbolicLowerBias[i] = 0; - _symbolicUpperBias[i] = FloatUtils::max( -sourceLb, sourceUb ); - - _symbolicLbOfLb[i] = 0; - _symbolicUbOfLb[i] = _symbolicUpperBias[i]; - _symbolicLbOfUb[i] = 0; - _symbolicUbOfUb[i] = _symbolicUpperBias[i]; - } - else - { - // The phase of this AbsoluteValueConstraint is fixed! - if ( absPhase == ABS_PHASE_POSITIVE ) - { - // Positive AbsoluteValue, bounds are propagated as is - } - else - { - // Negative AbsoluteValue, bounds are negated and flipped - double temp; - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - temp = _symbolicUb[j * _size + i]; - _symbolicUb[j * _size + i] = -_symbolicLb[j * _size + i]; - _symbolicLb[j * _size + i] = -temp; - } - - temp = _symbolicLowerBias[i]; - _symbolicLowerBias[i] = -_symbolicUpperBias[i]; - _symbolicUpperBias[i] = -temp; - - // Old lb, negated, is the new ub - temp = _symbolicLbOfLb[i]; - _symbolicLbOfLb[i] = -_symbolicUbOfUb[i]; - _symbolicUbOfUb[i] = -temp; - - temp = _symbolicUbOfLb[i]; - _symbolicUbOfLb[i] = -_symbolicLbOfUb[i]; - _symbolicLbOfUb[i] = -temp; - } - } - - // In extreme cases (constraint set externally), _symbolicLbOfLb - // could be negative - so adjust this - if ( _symbolicLbOfLb[i] < 0 ) - _symbolicLbOfLb[i] = 0; - - /* - We now have the tightest bounds we can for the abs - variable. If they are tigheter than what was previously - known, store them. - */ - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeSymbolicBoundsForLeakyRelu() -{ - ASSERT( _alpha > 0 && _alpha < 1 ); - - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - } - } - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - /* - There are two ways we can determine that a LeakyReLU has become fixed: - - 1. If the LeakyReLU's variable has been externally fixed - 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) - */ - PhaseStatus leakyReluPhase = PHASE_NOT_FIXED; - - // Has the f variable been eliminated or fixed? - if ( FloatUtils::isPositive( _lb[i] ) ) - leakyReluPhase = RELU_PHASE_ACTIVE; - else if ( FloatUtils::isZero( _ub[i] ) ) - leakyReluPhase = RELU_PHASE_INACTIVE; - - ASSERT( _neuronToActivationSources.exists( i ) ); - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - /* - A LeakyReLU initially "inherits" the symbolic bounds computed - for its input variable - */ - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; - } - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; - _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - - double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); - double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - - _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); - _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); - _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); - _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - // Has the b variable been fixed? - if ( !FloatUtils::isNegative( sourceLb ) ) - { - leakyReluPhase = RELU_PHASE_ACTIVE; - } - else if ( !FloatUtils::isPositive( sourceUb ) ) - { - leakyReluPhase = RELU_PHASE_INACTIVE; - } - - if ( leakyReluPhase == PHASE_NOT_FIXED ) - { - // LeakyReLU not fixed - // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) - // Concrete upper bound: x_f <= ub_b - double width = sourceUb - sourceLb; - double weight = ( sourceUb - _alpha * sourceLb ) / width; - - if ( _alpha <= 1 ) - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] *= weight; - } - - // Do the same for the bias, and then adjust - _symbolicUpperBias[i] *= weight; - _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; - - - // For the lower bound, in general, x_f >= lambda * x_b, where - // 0 <= lambda <= 1, would be a sound lower bound. We - // use the heuristic described in section 4.1 of - // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf - // to set the value of lambda (either 0 or 1 is considered). - if ( sourceUb > sourceLb ) - { - // lambda = 1 - // Symbolic lower bound: x_f >= x_b - // Concrete lower bound: x_f >= sourceLb - - // Lower bounds are passed as is - } - else - { - // lambda = 1 - // Symbolic lower bound: x_f >= _alpha x_b - // Concrete lower bound: x_f >= 0 - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] *= _alpha; - } - - _symbolicLowerBias[i] *= _alpha; - } - } - else - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] *= weight; - } - - // Do the same for the bias, and then adjust - _symbolicLowerBias[i] *= weight; - _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; - - if ( sourceUb > sourceLb ) - { - // Upper bounds are passed as is - } - else - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] *= _alpha; - } - - _symbolicUpperBias[i] *= _alpha; - } - } - - /* - We now have the symbolic representation for the current - layer. Next, we compute new lower and upper bounds for - it. For each of these bounds, we compute an upper bound and - a lower bound. - */ - _symbolicLbOfLb[i] = _symbolicLowerBias[i]; - _symbolicUbOfLb[i] = _symbolicLowerBias[i]; - _symbolicLbOfUb[i] = _symbolicUpperBias[i]; - _symbolicUbOfUb[i] = _symbolicUpperBias[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); - double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - - double entry = _symbolicLb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfLb[i] += ( entry * inputLb ); - _symbolicUbOfLb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfLb[i] += ( entry * inputUb ); - _symbolicUbOfLb[i] += ( entry * inputLb ); - } - - entry = _symbolicUb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfUb[i] += ( entry * inputLb ); - _symbolicUbOfUb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfUb[i] += ( entry * inputUb ); - _symbolicUbOfUb[i] += ( entry * inputLb ); - } - } - } - else - { - // The phase of this LeakyReLU is fixed! - if ( leakyReluPhase == RELU_PHASE_ACTIVE ) - { - // Positive LeakyReLU, bounds are propagated as is - } - else - { - // Negative LeakyReLU, bounds are multiplied by _alpha - _symbolicLbOfLb[i] *= _alpha; - _symbolicUbOfLb[i] *= _alpha; - _symbolicLbOfUb[i] *= _alpha; - _symbolicUbOfUb[i] *= _alpha; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] *= _alpha; - _symbolicLb[j * _size + i] *= _alpha; - } - - _symbolicLowerBias[i] *= _alpha; - _symbolicUpperBias[i] *= _alpha; - } - } - - if ( _symbolicUbOfUb[i] > sourceUb ) - _symbolicUbOfUb[i] = sourceUb; - if ( _symbolicLbOfLb[i] < _alpha * sourceLb ) - _symbolicLbOfLb[i] = _alpha * sourceLb; - - /* - We now have the tightest bounds we can for the leakyRelu - variable. If they are tigheter than what was previously - known, store them. - */ - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeSymbolicBoundsForSigmoid() -{ - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - } - } - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - ASSERT( _neuronToActivationSources.exists( i ) ); - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - /* - A Sigmoid initially "inherits" the symbolic bounds computed - for its input variable - */ - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; - } - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; - _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - - double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); - double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - - _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); - _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); - _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); - _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - // Bounds of lb, ub are the Sigmoids of source lb, ub - double sourceUbSigmoid = SigmoidConstraint::sigmoid( sourceUb ); - double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); - - // Case when the Sigmoid constraint is fixed - if ( FloatUtils::areEqual( sourceLb, sourceUb ) ) - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = 0; - _symbolicUb[j * _size + i] = 0; - } - - _symbolicLbOfUb[i] = sourceUbSigmoid; - _symbolicUbOfUb[i] = sourceUbSigmoid; - _symbolicLbOfLb[i] = sourceLbSigmoid; - _symbolicUbOfLb[i] = sourceLbSigmoid; - - _symbolicUpperBias[i] = sourceUbSigmoid; - _symbolicLowerBias[i] = sourceLbSigmoid; - } - - // Sigmoid not fixed - else - { - double lambda = ( _ub[i] - _lb[i] ) / ( sourceUb - sourceLb ); - double lambdaPrime = std::min( SigmoidConstraint::sigmoidDerivative( sourceLb ), - SigmoidConstraint::sigmoidDerivative( sourceUb ) ); - - // update lower bound - if ( FloatUtils::isPositive( sourceLb ) ) - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] *= lambda; - } - - // Do the same for the bias, and then adjust - _symbolicLowerBias[i] *= lambda; - _symbolicLowerBias[i] += sourceLbSigmoid - lambda * sourceLb; - } - else - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] *= lambdaPrime; - } - - // Do the same for the bias, and then adjust - _symbolicLowerBias[i] *= lambdaPrime; - _symbolicLowerBias[i] += sourceLbSigmoid - lambdaPrime * sourceLb; - } - - // update upper bound - if ( !FloatUtils::isPositive( sourceUb ) ) - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] *= lambda; - } - - // Do the same for the bias, and then adjust - _symbolicUpperBias[i] *= lambda; - _symbolicUpperBias[i] += sourceUbSigmoid - lambda * sourceUb; - } - else - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] *= lambdaPrime; - } - - // Do the same for the bias, and then adjust - _symbolicUpperBias[i] *= lambdaPrime; - _symbolicUpperBias[i] += sourceUbSigmoid - lambdaPrime * sourceUb; - } - - - /* - We now have the symbolic representation for the current - layer. Next, we compute new lower and upper bounds for - it. For each of these bounds, we compute an upper bound and - a lower bound. - */ - _symbolicLbOfLb[i] = _symbolicLowerBias[i]; - _symbolicUbOfLb[i] = _symbolicLowerBias[i]; - _symbolicLbOfUb[i] = _symbolicUpperBias[i]; - _symbolicUbOfUb[i] = _symbolicUpperBias[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); - double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - - double entry = _symbolicLb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfLb[i] += ( entry * inputLb ); - _symbolicUbOfLb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfLb[i] += ( entry * inputUb ); - _symbolicUbOfLb[i] += ( entry * inputLb ); - } - - entry = _symbolicUb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfUb[i] += ( entry * inputLb ); - _symbolicUbOfUb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfUb[i] += ( entry * inputUb ); - _symbolicUbOfUb[i] += ( entry * inputLb ); - } - } - } - - if ( _symbolicLbOfLb[i] < -1 ) - _symbolicLbOfLb[i] = -1; - if ( _symbolicUbOfUb[i] > 1 ) - _symbolicUbOfUb[i] = 1; - - /* - We now have the tightest bounds we can for the relu - variable. If they are tigheter than what was previously - known, store them. - */ - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeSymbolicBoundsForRound() -{ - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - } - } - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - ASSERT( _neuronToActivationSources.exists( i ) ); - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - /* - A Round initially "inherits" the symbolic bounds computed - for its input variable - */ - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; - } - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; - _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - - double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); - double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - - _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); - _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); - _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); - _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - - // Bounds of lb, ub are the rounded values of source lb, ub - double sourceUbRound = FloatUtils::round( sourceUb ); - double sourceLbRound = FloatUtils::round( sourceLb ); - - _symbolicLbOfUb[i] = sourceUbRound; - _symbolicUbOfUb[i] = sourceUbRound; - _symbolicLbOfLb[i] = sourceLbRound; - _symbolicUbOfLb[i] = sourceLbRound; - - - // Case when the Round constraint is fixed - if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) - { - _symbolicUb[i] = 0; - _symbolicUpperBias[i] = sourceUbRound; - - _symbolicLb[i] = 0; - _symbolicLowerBias[i] = sourceLbRound; - } - - // Round not fixed - else - { - // Symbolic upper bound: x_f <= x_b + 0.5 - // Concrete upper bound: x_f <= round(ub_b) - - _symbolicUpperBias[i] += 0.5; - - // Symbolic lower bound: x_f >= x_b - 0.5 - // Concrete lower bound: x_f >= round(lb_b) - - _symbolicLowerBias[i] -= 0.5; - } - - /* - We now have the tightest bounds we can for the relu - variable. If they are tigheter than what was previously - known, store them. - */ - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeSymbolicBoundsForMax() -{ - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - } - } - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - ASSERT( _neuronToActivationSources.exists( i ) ); - List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); - double maxLowerBound = FloatUtils::negativeInfinity(); - double maxUpperBound = FloatUtils::negativeInfinity(); - - Map sourceLbs; - Map sourceUbs; - for ( const auto &sourceIndex : sources ) - { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - - sourceLbs[sourceIndex] = sourceLb; - sourceUbs[sourceIndex] = sourceUb; - - if ( maxLowerBound < sourceLb ) - { - indexOfMaxLowerBound = sourceIndex; - maxLowerBound = sourceLb; - } - if ( maxUpperBound < sourceUb ) - { - maxUpperBound = sourceUb; - } - } - - // The phase is fixed if the lower-bound of a source variable x_b is - // larger than the upper-bounds of the other source variables. - bool phaseFixed = true; - for ( const auto &sourceIndex : sources ) - { - if ( sourceIndex != indexOfMaxLowerBound && - FloatUtils::gt( sourceUbs[sourceIndex], maxLowerBound ) ) - { - phaseFixed = false; - break; - } - } - - if ( phaseFixed ) - { - // Phase fixed - // Symbolic bound: x_b <= x_f <= x_b - // Concrete bound: lb_b <= x_f <= ub_b - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; - } - _symbolicLowerBias[i] = - sourceLayer->getSymbolicLowerBias()[indexOfMaxLowerBound._neuron]; - _symbolicUpperBias[i] = - sourceLayer->getSymbolicUpperBias()[indexOfMaxLowerBound._neuron]; - - - _symbolicLbOfLb[i] = maxLowerBound; - _symbolicUbOfLb[i] = maxLowerBound; - _symbolicLbOfUb[i] = sourceUbs[indexOfMaxLowerBound]; - _symbolicUbOfUb[i] = sourceUbs[indexOfMaxLowerBound]; - } - else - { - // MaxPool not fixed - // Symbolic bounds: x_b <= x_f <= maxUpperBound - // Concrete bounds: lb_b <= x_f <= maxUpperBound - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; - _symbolicUb[j * _size + i] = 0; - } - _symbolicLowerBias[i] = - sourceLayer->getSymbolicLowerBias()[indexOfMaxLowerBound._neuron]; - _symbolicUpperBias[i] = maxUpperBound; - - _symbolicLbOfLb[i] = maxLowerBound; - _symbolicUbOfLb[i] = maxLowerBound; - _symbolicLbOfUb[i] = maxUpperBound; - _symbolicUbOfUb[i] = maxUpperBound; - } - - /* - We now have the tightest bounds we can for the relu - variable. If they are tigheter than what was previously - known, store them. - */ - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeSymbolicBoundsForSoftmax() -{ - std::fill_n( _symbolicLowerBias, _size, 0 ); - std::fill_n( _symbolicUpperBias, _size, 0 ); - - double *symbolicLb = new double[_size * _size]; - double *symbolicUb = new double[_size * _size]; - std::fill_n( symbolicLb, _size * _size, 0 ); - std::fill_n( symbolicUb, _size * _size, 0 ); - - double *_work = new double[_size * _size]; - std::fill_n( _work, _size * _size, 0 ); - - Set handledInputNeurons; - unsigned sourceLayerSize = _size; - SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - } - } - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - ASSERT( _neuronToActivationSources.exists( i ) ); - List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - sourceLayerSize = sourceLayer->getSize(); - ASSERT( sourceLayerSize == _size ); - - Vector sourceLbs; - Vector sourceUbs; - Vector sourceMids; - Vector targetLbs; - Vector targetUbs; - unsigned len = 0; - for ( const auto &sourceIndex : sources ) - { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - - sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceMids.append( ( sourceLb + sourceUb ) / 2 ); - targetLbs.append( _lb[i] ); - targetUbs.append( _ub[i] ); - - ++len; - } - - // Find the index of i in the softmax - unsigned index = 0; - for ( const auto &sourceIndex : sources ) - { - if ( handledInputNeurons.exists( sourceIndex._neuron ) ) - ++index; - else - { - handledInputNeurons.insert( sourceIndex._neuron ); - break; - } - } - - double lb = linearLowerBound( sourceLbs, sourceUbs, index ); - double ub = linearUpperBound( sourceLbs, sourceUbs, index ); - if ( _lb[i] < lb ) - { - _lb[i] = lb; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - if ( _ub[i] > ub ) - { - _ub[i] = ub; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - targetLbs[index] = _lb[i]; - targetUbs[index] = _ub[i]; - - if ( FloatUtils::areEqual( _lb[i], _ub[i] ) ) - { - _symbolicLowerBias[i] = _lb[i]; - _symbolicUpperBias[i] = _ub[i]; - for ( const auto &sourceIndex : sources ) - { - symbolicLb[len * sourceIndex._neuron + i] = 0; - symbolicUb[len * sourceIndex._neuron + i] = 0; - } - } - else - { - // Compute symbolic bound - if ( boundType == SoftmaxBoundType::LOG_SUM_EXP_DECOMPOSITION ) - { - bool useLSE2 = false; - for ( const auto &lb : targetLbs ) - { - if ( lb > GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD ) - useLSE2 = true; - } - unsigned inputIndex = 0; - if ( !useLSE2 ) - { - _symbolicLowerBias[i] = - LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); - for ( const auto &sourceIndex : sources ) - { - double dldj = - dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; - _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; - ++inputIndex; - } - } - else - { - _symbolicLowerBias[i] = - LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); - for ( const auto &sourceIndex : sources ) - { - double dldj = - dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; - _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; - ++inputIndex; - } - } - - _symbolicUpperBias[i] = LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); - inputIndex = 0; - for ( const auto &sourceIndex : sources ) - { - double dudj = - dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - symbolicUb[len * sourceIndex._neuron + i] = dudj; - _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; - ++inputIndex; - } - } - else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) - { - _symbolicLowerBias[i] = ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); - unsigned inputIndex = 0; - for ( const auto &sourceIndex : sources ) - { - double dldj = - dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; - _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; - ++inputIndex; - } - - _symbolicUpperBias[i] = ERUpperBound( sourceMids, targetLbs, targetUbs, index ); - inputIndex = 0; - for ( const auto &sourceIndex : sources ) - { - double dudj = - dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - symbolicUb[len * sourceIndex._neuron + i] = dudj; - _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; - ++inputIndex; - } - } - } - } - - for ( const auto &sourceLayerEntry : _sourceLayers ) - { - const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerEntry.first ); - - /* - Perform the multiplication - - newUB = oldUB * posWeights + oldLB * negWeights - newLB = oldUB * negWeights + oldLB * posWeights - */ - - for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) - { - if ( symbolicLb[i] > 0 ) - _work[i] = symbolicLb[i]; - else - _work[i] = 0; - } - // _work is now positive weights in symbolicLb - matrixMultiplication( sourceLayer->getSymbolicLb(), - _work, - _symbolicLb, - _inputLayerSize, - sourceLayerSize, - _size ); - if ( sourceLayer->getSymbolicLowerBias() ) - matrixMultiplication( sourceLayer->getSymbolicLowerBias(), - _work, - _symbolicLowerBias, - 1, - sourceLayerSize, - _size ); - - for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) - { - if ( symbolicLb[i] < 0 ) - _work[i] = symbolicLb[i]; - else - _work[i] = 0; - } - // _work is now negative weights in symbolicLb - matrixMultiplication( sourceLayer->getSymbolicUb(), - _work, - _symbolicLb, - _inputLayerSize, - sourceLayerSize, - _size ); - if ( sourceLayer->getSymbolicLowerBias() ) - matrixMultiplication( sourceLayer->getSymbolicUpperBias(), - _work, - _symbolicLowerBias, - 1, - sourceLayerSize, - _size ); - - for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) - { - if ( symbolicUb[i] > 0 ) - _work[i] = symbolicUb[i]; - else - _work[i] = 0; - } - // _work is now positive weights in symbolicUb - matrixMultiplication( sourceLayer->getSymbolicUb(), - _work, - _symbolicUb, - _inputLayerSize, - sourceLayerSize, - _size ); - if ( sourceLayer->getSymbolicUpperBias() ) - matrixMultiplication( sourceLayer->getSymbolicUpperBias(), - _work, - _symbolicUpperBias, - 1, - sourceLayerSize, - _size ); - - for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) - { - if ( symbolicUb[i] < 0 ) - _work[i] = symbolicUb[i]; - else - _work[i] = 0; - } - // _work is now negative weights in symbolicUb - matrixMultiplication( sourceLayer->getSymbolicLb(), - _work, - _symbolicUb, - _inputLayerSize, - sourceLayerSize, - _size ); - if ( sourceLayer->getSymbolicUpperBias() ) - matrixMultiplication( sourceLayer->getSymbolicLowerBias(), - _work, - _symbolicUpperBias, - 1, - sourceLayerSize, - _size ); - } - - /* - We now have the symbolic representation for the current - layer. Next, we compute new lower and upper bounds for - it. For each of these bounds, we compute an upper bound and - a lower bound. - */ - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - _symbolicLbOfLb[i] = _symbolicLowerBias[i]; - _symbolicUbOfLb[i] = _symbolicLowerBias[i]; - _symbolicLbOfUb[i] = _symbolicUpperBias[i]; - _symbolicUbOfUb[i] = _symbolicUpperBias[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); - double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - - double entry = _symbolicLb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfLb[i] += ( entry * inputLb ); - _symbolicUbOfLb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfLb[i] += ( entry * inputUb ); - _symbolicUbOfLb[i] += ( entry * inputLb ); - } - - entry = _symbolicUb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfUb[i] += ( entry * inputLb ); - _symbolicUbOfUb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfUb[i] += ( entry * inputUb ); - _symbolicUbOfUb[i] += ( entry * inputLb ); - } - } - } - - if ( symbolicLb ) - { - delete[] symbolicLb; - symbolicLb = NULL; - } - if ( symbolicUb ) - { - delete[] symbolicUb; - symbolicUb = NULL; - } - if ( _work ) - { - delete[] _work; - _work = NULL; - } -} - -void Layer::computeSymbolicBoundsForBilinear() -{ - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - } - } - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - ASSERT( _neuronToActivationSources.exists( i ) ); - List sources = getActivationSources( i ); - ASSERT( sources.size() == 2 ); - - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - Vector sourceLbs; - Vector sourceUbs; - Vector sourceValues; - bool allConstant = true; - unsigned indexA = 0; - unsigned indexB = 0; - unsigned counter = 0; - for ( const auto &sourceIndex : sources ) - { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - - sourceLbs.append( sourceLb ); - sourceUbs.append( sourceUb ); - - if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) - { - allConstant = false; - } - else - { - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - sourceValues.append( sourceValue ); - } - - if ( counter == 0 ) - { - indexA = sourceIndex._neuron; - } - else - { - indexB = sourceIndex._neuron; - } - ++counter; - } - - if ( allConstant ) - { - // If the both source neurons have been eliminated, this neuron is constant - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] = 0; - _symbolicLb[j * _size + i] = 0; - } - - _symbolicUpperBias[i] = sourceValues[0] * sourceValues[1]; - _symbolicLowerBias[i] = sourceValues[0] * sourceValues[1]; - continue; - } - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] = 0; - _symbolicLb[j * _size + i] = 0; - } - - // Symbolic lower bound: - // out >= alpha * x + beta * y + gamma - // where alpha = lb_y, beta = lb_x, gamma = -lb_x * lb_y - - // Symbolic upper bound: - // out <= alpha * x + beta * y + gamma - // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - if ( sourceLbs[1] >= 0 ) - { - _symbolicLb[j * _size + i] += - sourceLbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; - } - else - { - _symbolicLb[j * _size + i] += - sourceLbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; - } - - if ( sourceUbs[1] >= 0 ) - { - _symbolicUb[j * _size + i] += - sourceUbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; - } - else - { - _symbolicUb[j * _size + i] += - sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; - } - - if ( sourceLbs[0] >= 0 ) - { - _symbolicLb[j * _size + i] += - sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; - _symbolicUb[j * _size + i] += - sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; - } - else - { - _symbolicLb[j * _size + i] += - sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; - _symbolicUb[j * _size + i] += - sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; - } - } - _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; - _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; - - double lb = FloatUtils::infinity(); - double ub = FloatUtils::negativeInfinity(); - List values = { sourceLbs[0] * sourceLbs[1], - sourceLbs[0] * sourceUbs[1], - sourceUbs[0] * sourceLbs[1], - sourceUbs[0] * sourceUbs[1] }; - for ( const auto &v : values ) - { - if ( v < lb ) - lb = v; - if ( v > ub ) - ub = v; - } - - /* - We now have the symbolic representation for the current - layer. Next, we compute new lower and upper bounds for - it. For each of these bounds, we compute an upper bound and - a lower bound. - */ - _symbolicLbOfLb[i] = _symbolicLowerBias[i]; - _symbolicUbOfLb[i] = _symbolicLowerBias[i]; - _symbolicLbOfUb[i] = _symbolicUpperBias[i]; - _symbolicUbOfUb[i] = _symbolicUpperBias[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); - double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - - double entry = _symbolicLb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfLb[i] += ( entry * inputLb ); - _symbolicUbOfLb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfLb[i] += ( entry * inputUb ); - _symbolicUbOfLb[i] += ( entry * inputLb ); - } - - entry = _symbolicUb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfUb[i] += ( entry * inputLb ); - _symbolicUbOfUb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfUb[i] += ( entry * inputUb ); - _symbolicUbOfUb[i] += ( entry * inputLb ); - } - } - - /* - We now have the tightest bounds we can for the relu - variable. If they are tigheter than what was previously - known, store them. - */ - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeSymbolicBoundsForWeightedSum() -{ - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - } - else - { - _symbolicLowerBias[i] = _bias[i]; - _symbolicUpperBias[i] = _bias[i]; - } - } - - for ( const auto &sourceLayerEntry : _sourceLayers ) - { - unsigned sourceLayerIndex = sourceLayerEntry.first; - unsigned sourceLayerSize = sourceLayerEntry.second; - const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerEntry.first ); - - /* - Perform the multiplication - - newUB = oldUB * posWeights + oldLB * negWeights - newLB = oldUB * negWeights + oldLB * posWeights - */ - - matrixMultiplication( sourceLayer->getSymbolicUb(), - _layerToPositiveWeights[sourceLayerIndex], - _symbolicUb, - _inputLayerSize, - sourceLayerSize, - _size ); - matrixMultiplication( sourceLayer->getSymbolicLb(), - _layerToNegativeWeights[sourceLayerIndex], - _symbolicUb, - _inputLayerSize, - sourceLayerSize, - _size ); - matrixMultiplication( sourceLayer->getSymbolicLb(), - _layerToPositiveWeights[sourceLayerIndex], - _symbolicLb, - _inputLayerSize, - sourceLayerSize, - _size ); - matrixMultiplication( sourceLayer->getSymbolicUb(), - _layerToNegativeWeights[sourceLayerIndex], - _symbolicLb, - _inputLayerSize, - sourceLayerSize, - _size ); - - // Restore the zero bound on eliminated neurons - unsigned index; - for ( const auto &eliminated : _eliminatedNeurons ) - { - for ( unsigned i = 0; i < _inputLayerSize; ++i ) - { - index = i * _size + eliminated.first; - _symbolicLb[index] = 0; - _symbolicUb[index] = 0; - } - } - - /* - Compute the biases for the new layer - */ - for ( unsigned j = 0; j < _size; ++j ) - { - if ( _eliminatedNeurons.exists( j ) ) - continue; - - // Add the weighted bias from the source layer - for ( unsigned k = 0; k < sourceLayerSize; ++k ) - { - double weight = _layerToWeights[sourceLayerIndex][k * _size + j]; - - if ( weight > 0 ) - { - _symbolicLowerBias[j] += sourceLayer->getSymbolicLowerBias()[k] * weight; - _symbolicUpperBias[j] += sourceLayer->getSymbolicUpperBias()[k] * weight; - } - else - { - _symbolicLowerBias[j] += sourceLayer->getSymbolicUpperBias()[k] * weight; - _symbolicUpperBias[j] += sourceLayer->getSymbolicLowerBias()[k] * weight; - } - } - } - } - - /* - We now have the symbolic representation for the current - layer. Next, we compute new lower and upper bounds for - it. For each of these bounds, we compute an upper bound and - a lower bound. - */ - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - _symbolicLbOfLb[i] = _symbolicLowerBias[i]; - _symbolicUbOfLb[i] = _symbolicLowerBias[i]; - _symbolicLbOfUb[i] = _symbolicUpperBias[i]; - _symbolicUbOfUb[i] = _symbolicUpperBias[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); - double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - - double entry = _symbolicLb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfLb[i] += ( entry * inputLb ); - _symbolicUbOfLb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfLb[i] += ( entry * inputUb ); - _symbolicUbOfLb[i] += ( entry * inputLb ); - } - - entry = _symbolicUb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfUb[i] += ( entry * inputLb ); - _symbolicUbOfUb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfUb[i] += ( entry * inputUb ); - _symbolicUbOfUb[i] += ( entry * inputLb ); - } - } - - /* - We now have the tightest bounds we can for the - weighted sum variable. If they are tigheter than - what was previously known, store them. - */ - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } -} - -void Layer::computeParameterisedSymbolicBounds( const Vector &coeffs, bool receive ) -{ - switch ( _type ) - { - case RELU: - computeParameterisedSymbolicBoundsForRelu( coeffs, receive ); - break; - - case SIGN: - computeParameterisedSymbolicBoundsForSign( coeffs, receive ); - break; - - case LEAKY_RELU: - computeParameterisedSymbolicBoundsForLeakyRelu( coeffs, receive ); - break; - - case BILINEAR: - computeParameterisedSymbolicBoundsForBilinear( coeffs, receive ); - break; - - default: - computeSymbolicBounds(); - break; - } -} - -void Layer::computeParameterisedSymbolicBoundsForRelu( const Vector &coeffs, bool receive ) -{ - ASSERT( coeffs.size() == 1 ); - - double coeff = coeffs[0]; - ASSERT( coeff >= 0 && coeff <= 1 ); - - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - } - } - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - /* - There are two ways we can determine that a ReLU has become fixed: - - 1. If the ReLU's variable has been externally fixed - 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) - */ - PhaseStatus reluPhase = PHASE_NOT_FIXED; - - // Has the f variable been eliminated or fixed? - if ( FloatUtils::isPositive( _lb[i] ) ) - reluPhase = RELU_PHASE_ACTIVE; - else if ( FloatUtils::isZero( _ub[i] ) ) - reluPhase = RELU_PHASE_INACTIVE; - - ASSERT( _neuronToActivationSources.exists( i ) ); - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - /* - A ReLU initially "inherits" the symbolic bounds computed - for its input variable - */ - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; - } - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; - _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - - double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); - double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - - _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); - _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); - _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); - _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - // Has the b variable been fixed? - if ( !FloatUtils::isNegative( sourceLb ) ) - { - reluPhase = RELU_PHASE_ACTIVE; - } - else if ( !FloatUtils::isPositive( sourceUb ) ) - { - reluPhase = RELU_PHASE_INACTIVE; - } - - if ( reluPhase == PHASE_NOT_FIXED ) - { - // If we got here, we know that lbLb < 0 and ubUb - // > 0 There are four possible cases, depending on - // whether ubLb and lbUb are negative or positive - // (see Neurify paper, page 14). - - // Upper bound - if ( _symbolicLbOfUb[i] <= 0 ) - { - // lbOfUb[i] < 0 < ubOfUb[i] - // Concretize the upper bound using the Ehler's-like approximation - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _symbolicUb[j * _size + i] = _symbolicUb[j * _size + i] * _symbolicUbOfUb[i] / - ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); - - // Do the same for the bias, and then adjust - _symbolicUpperBias[i] = _symbolicUpperBias[i] * _symbolicUbOfUb[i] / - ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); - _symbolicUpperBias[i] -= _symbolicLbOfUb[i] * _symbolicUbOfUb[i] / - ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); - } - - // Lower bound: y >= coeff * x (varies continuously between y >= 0 and y >= alpha * x). - if ( _symbolicUbOfLb[i] <= 0 ) - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _symbolicLb[j * _size + i] *= coeff; - - _symbolicLowerBias[i] *= coeff; - } - else - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _symbolicLb[j * _size + i] = _symbolicLb[j * _size + i] * _symbolicUbOfLb[i] / - ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); - - _symbolicLowerBias[i] = _symbolicLowerBias[i] * _symbolicUbOfLb[i] / - ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); - } - - _symbolicLbOfLb[i] = 0; - } - else - { - // The phase of this ReLU is fixed! - if ( reluPhase == RELU_PHASE_ACTIVE ) - { - // Active ReLU, bounds are propagated as is - } - else - { - // Inactive ReLU, returns zero - _symbolicLbOfLb[i] = 0; - _symbolicUbOfLb[i] = 0; - _symbolicLbOfUb[i] = 0; - _symbolicUbOfUb[i] = 0; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] = 0; - _symbolicLb[j * _size + i] = 0; - } - - _symbolicLowerBias[i] = 0; - _symbolicUpperBias[i] = 0; - } - } - - if ( _symbolicLbOfUb[i] < 0 ) - _symbolicLbOfUb[i] = 0; - - /* - We now have the tightest bounds we can for the relu - variable. If they are tigheter than what was previously - known, store them. - */ - if ( receive ) - { - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } - } -} - -void Layer::computeParameterisedSymbolicBoundsForSign( const Vector &coeffs, bool receive ) -{ - ASSERT( coeffs.size() == 2 ); - ASSERT( coeffs[0] >= 0 && coeffs[0] <= 1 ); - ASSERT( coeffs[1] >= 0 && coeffs[1] <= 1 ); - - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - // Eliminate neurons are skipped - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - - continue; - } - - /* - There are two ways we can determine that a Sign has become fixed: - - 1. If the Sign's variable has been externally fixed - 2. lbLb >= 0 (Positive) or ubUb < 0 (Negative) - */ - PhaseStatus signPhase = PHASE_NOT_FIXED; - - // Has the f variable been eliminated or fixed? - if ( !FloatUtils::isNegative( _lb[i] ) ) - signPhase = SIGN_PHASE_POSITIVE; - else if ( FloatUtils::isNegative( _ub[i] ) ) - signPhase = SIGN_PHASE_NEGATIVE; - - ASSERT( _neuronToActivationSources.exists( i ) ); - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - /* - A Sign initially "inherits" the symbolic bounds computed - for its input variable - */ - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; - } - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; - _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - - double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); - double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - - _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); - _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); - _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); - _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - // Has the b variable been fixed? - if ( !FloatUtils::isNegative( sourceLb ) ) - { - signPhase = SIGN_PHASE_POSITIVE; - } - else if ( FloatUtils::isNegative( sourceUb ) ) - { - signPhase = SIGN_PHASE_NEGATIVE; - } - - if ( signPhase == PHASE_NOT_FIXED ) - { - PhaseStatus upperSignPhase = PHASE_NOT_FIXED; - PhaseStatus lowerSignPhase = PHASE_NOT_FIXED; - - // If we got here, we know that lbLb < 0 and ubUb - // > 0 - - // Upper bound - if ( !FloatUtils::isNegative( _symbolicLbOfUb[i] ) ) - { - // The upper bound is strictly positive - turns into - // the constant 1 - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _symbolicUb[j * _size + i] = 0; - - _symbolicUpperBias[i] = 1; - - upperSignPhase = SIGN_PHASE_POSITIVE; - } - else - { - // The upper bound's phase is not fixed, use parameterised - // parallelogram approximation: y <= - 2 / l * coeffs[0] * x + 1 - // (varies continuously between y <= 1 and y <= -2 / l * x + 1). - double factor = -2.0 / _symbolicLbOfLb[i] * coeffs[0]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _symbolicUb[j * _size + i] *= factor; - - // Do the same for the bias, and then adjust - _symbolicUpperBias[i] *= factor; - _symbolicUpperBias[i] += 1; - } - - // Lower bound - if ( FloatUtils::isNegative( _symbolicUbOfLb[i] ) ) - { - // The lower bound is strictly negative - turns into - // the constant -1 - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _symbolicLb[j * _size + i] = 0; - - _symbolicLowerBias[i] = -1; - - lowerSignPhase = SIGN_PHASE_NEGATIVE; - } - else - { - // The lower bound's phase is not fixed, use parameterised - // parallelogram approximation: y >= 2 / u * coeffs[1] * x - 1 - // (varies continuously between y >= -1 and y >= 2 / u * x - 1). - double factor = 2.0 / _symbolicUbOfUb[i] * coeffs[1]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] *= factor; - } - - // Do the same for the bias, and then adjust - _symbolicLowerBias[i] *= factor; - _symbolicLowerBias[i] -= 1; - } - - if ( upperSignPhase == PHASE_NOT_FIXED ) - { - _symbolicUbOfUb[i] = 1; - _symbolicLbOfUb[i] = -1; - } - else - { - _symbolicUbOfUb[i] = 1; - _symbolicLbOfUb[i] = 1; - } - - if ( lowerSignPhase == PHASE_NOT_FIXED ) - { - _symbolicUbOfLb[i] = 1; - _symbolicLbOfLb[i] = -1; - } - else - { - _symbolicUbOfLb[i] = -1; - _symbolicLbOfLb[i] = -1; - } - } - else - { - // The phase of this Sign is fixed! - double constant = ( signPhase == SIGN_PHASE_POSITIVE ) ? 1 : -1; - - _symbolicLbOfLb[i] = constant; - _symbolicUbOfLb[i] = constant; - _symbolicLbOfUb[i] = constant; - _symbolicUbOfUb[i] = constant; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] = 0; - _symbolicLb[j * _size + i] = 0; - } - - _symbolicLowerBias[i] = constant; - _symbolicUpperBias[i] = constant; - } - - if ( _symbolicLbOfLb[i] < -1 ) - _symbolicLbOfLb[i] = -1; - if ( _symbolicUbOfUb[i] > 1 ) - _symbolicUbOfUb[i] = 1; - - /* - We now have the tightest bounds we can for the sign - variable. If they are tigheter than what was previously - known, store them. - */ - if ( receive ) - { - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } - } -} - -void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( const Vector &coeffs, - bool receive ) -{ - ASSERT( _alpha > 0 && _alpha < 1 ); - ASSERT( coeffs.size() == 1 ); - double coeff = coeffs[0]; - ASSERT( coeff >= 0 && coeff <= 1 ); - - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - } - } - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - /* - There are two ways we can determine that a LeakyReLU has become fixed: - - 1. If the LeakyReLU's variable has been externally fixed - 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) - */ - PhaseStatus leakyReluPhase = PHASE_NOT_FIXED; - - // Has the f variable been eliminated or fixed? - if ( FloatUtils::isPositive( _lb[i] ) ) - leakyReluPhase = RELU_PHASE_ACTIVE; - else if ( FloatUtils::isZero( _ub[i] ) ) - leakyReluPhase = RELU_PHASE_INACTIVE; - - ASSERT( _neuronToActivationSources.exists( i ) ); - NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); - const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - - /* - A LeakyReLU initially "inherits" the symbolic bounds computed - for its input variable - */ - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; - } - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; - _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - - double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); - double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - - _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); - _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); - _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); - _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - // Has the b variable been fixed? - if ( !FloatUtils::isNegative( sourceLb ) ) - { - leakyReluPhase = RELU_PHASE_ACTIVE; - } - else if ( !FloatUtils::isPositive( sourceUb ) ) - { - leakyReluPhase = RELU_PHASE_INACTIVE; - } - - if ( leakyReluPhase == PHASE_NOT_FIXED ) - { - // LeakyReLU not fixed - // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) - // Concrete upper bound: x_f <= ub_b - double width = sourceUb - sourceLb; - double weight = ( sourceUb - _alpha * sourceLb ) / width; - - if ( _alpha <= 1 ) - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] *= weight; - } - - // Do the same for the bias, and then adjust - _symbolicUpperBias[i] *= weight; - _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; - - - // For the lower bound, in general, x_f >= lambda * x_b, where - // 0 <= lambda <= 1, would be a sound lower bound. We - // use the heuristic described in section 4.1 of - // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf - // to set the value of lambda (either 0 or 1 is considered). - - // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = - // alpha and lambda = 1). Symbolic lower bound: x_f >= ((1 - alpha) * coeff + - // alpha) x_b Concrete lower bound: x_f >= 0 - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] *= ( 1 - _alpha ) * coeff + _alpha; - } - - _symbolicLowerBias[i] *= ( 1 - _alpha ) * coeff + _alpha; - } - - else - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] *= weight; - } - - // Do the same for the bias, and then adjust - _symbolicLowerBias[i] *= weight; - _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; - - // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = - // alpha and lambda = 1). Symbolic lower bound: x_f >= ((1 - alpha) * coeff + - // alpha) x_b Concrete lower bound: x_f >= 0 - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] *= ( 1 - _alpha ) * coeff + _alpha; - } - - _symbolicUpperBias[i] *= ( 1 - _alpha ) * coeff + _alpha; - } - - /* - We now have the symbolic representation for the current - layer. Next, we compute new lower and upper bounds for - it. For each of these bounds, we compute an upper bound and - a lower bound. - */ - _symbolicLbOfLb[i] = _symbolicLowerBias[i]; - _symbolicUbOfLb[i] = _symbolicLowerBias[i]; - _symbolicLbOfUb[i] = _symbolicUpperBias[i]; - _symbolicUbOfUb[i] = _symbolicUpperBias[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); - double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - - double entry = _symbolicLb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfLb[i] += ( entry * inputLb ); - _symbolicUbOfLb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfLb[i] += ( entry * inputUb ); - _symbolicUbOfLb[i] += ( entry * inputLb ); - } - - entry = _symbolicUb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfUb[i] += ( entry * inputLb ); - _symbolicUbOfUb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfUb[i] += ( entry * inputUb ); - _symbolicUbOfUb[i] += ( entry * inputLb ); - } - } - } - else - { - // The phase of this LeakyReLU is fixed! - if ( leakyReluPhase == RELU_PHASE_ACTIVE ) - { - // Positive LeakyReLU, bounds are propagated as is - } - else - { - // Negative LeakyReLU, bounds are multiplied by _alpha - _symbolicLbOfLb[i] *= _alpha; - _symbolicUbOfLb[i] *= _alpha; - _symbolicLbOfUb[i] *= _alpha; - _symbolicUbOfUb[i] *= _alpha; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] *= _alpha; - _symbolicLb[j * _size + i] *= _alpha; - } - - _symbolicLowerBias[i] *= _alpha; - _symbolicUpperBias[i] *= _alpha; - } - } - - if ( _symbolicUbOfUb[i] > sourceUb ) - _symbolicUbOfUb[i] = sourceUb; - if ( _symbolicLbOfLb[i] < _alpha * sourceLb ) - _symbolicLbOfLb[i] = _alpha * sourceLb; - - /* - We now have the tightest bounds we can for the leakyRelu - variable. If they are tigheter than what was previously - known, store them. - */ - if ( receive ) - { - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } - } -} - -void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector &coeffs, - bool receive ) -{ - ASSERT( coeffs.size() == 2 ); - ASSERT( coeffs[0] >= 0 && coeffs[0] <= 1 ); - ASSERT( coeffs[1] >= 0 && coeffs[1] <= 1 ); - - std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); - std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - _symbolicLowerBias[i] = _eliminatedNeurons[i]; - _symbolicUpperBias[i] = _eliminatedNeurons[i]; - - _symbolicLbOfLb[i] = _eliminatedNeurons[i]; - _symbolicUbOfLb[i] = _eliminatedNeurons[i]; - _symbolicLbOfUb[i] = _eliminatedNeurons[i]; - _symbolicUbOfUb[i] = _eliminatedNeurons[i]; - } - } - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - continue; - - ASSERT( _neuronToActivationSources.exists( i ) ); - List sources = getActivationSources( i ); - ASSERT( sources.size() == 2 ); - - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - - Vector sourceLbs; - Vector sourceUbs; - Vector sourceValues; - bool allConstant = true; - unsigned indexA = 0; - unsigned indexB = 0; - unsigned counter = 0; - for ( const auto &sourceIndex : sources ) - { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - - sourceLbs.append( sourceLb ); - sourceUbs.append( sourceUb ); - - if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) - { - allConstant = false; - } - else - { - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - sourceValues.append( sourceValue ); - } - - if ( counter == 0 ) - { - indexA = sourceIndex._neuron; - } - else - { - indexB = sourceIndex._neuron; - } - ++counter; - } - - if ( allConstant ) - { - // If the both source neurons have been eliminated, this neuron is constant - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] = 0; - _symbolicLb[j * _size + i] = 0; - } - - _symbolicUpperBias[i] = sourceValues[0] * sourceValues[1]; - _symbolicLowerBias[i] = sourceValues[0] * sourceValues[1]; - continue; - } - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] = 0; - _symbolicLb[j * _size + i] = 0; - } - - // Billinear linear relaxation (arXiv:2405.21063v2 [cs.LG]) - // Lower bound: out >= aLower * x + bLower * y + c_l, where - // aLower = alpha1 * l_y + ( 1 - alpha1 ) * u_y - // bLower = alpha1 * l_x + ( 1 - alpha1 ) * u_x - // cLower = -alpha1 * l_x * l_y - ( 1 - alpha1 ) * u_x * u_y - - // Upper bound: out <= aUpper * x + bUpper * y + c_u, where - // aUpper = alpha2 * u_y + ( 1 - alpha2 ) * l_y - // bUpper = alpha2 * l_x + ( 1 - alpha2 ) * u_x - // cUpper = -alpha2 * l_x * u_y - ( 1 - alpha2 ) * u_x * l_y - - double aLower = coeffs[0] * sourceLbs[1] + ( 1 - coeffs[0] ) * sourceUbs[1]; - double aUpper = coeffs[1] * sourceUbs[1] + ( 1 - coeffs[1] ) * sourceLbs[1]; - double bLower = coeffs[0] * sourceLbs[0] + ( 1 - coeffs[0] ) * sourceUbs[0]; - double bUpper = coeffs[1] * sourceLbs[0] + ( 1 - coeffs[1] ) * sourceUbs[0]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - if ( aLower >= 0 ) - { - _symbolicLb[j * _size + i] += - aLower * sourceSymbolicLb[j * sourceLayerSize + indexA]; - } - else - { - _symbolicLb[j * _size + i] += - aLower * sourceSymbolicUb[j * sourceLayerSize + indexA]; - } - - if ( aUpper >= 0 ) - { - _symbolicUb[j * _size + i] += - aUpper * sourceSymbolicUb[j * sourceLayerSize + indexA]; - } - else - { - _symbolicUb[j * _size + i] += - aUpper * sourceSymbolicLb[j * sourceLayerSize + indexA]; - } - - if ( bLower >= 0 ) - { - _symbolicLb[j * _size + i] += - bLower * sourceSymbolicLb[j * sourceLayerSize + indexB]; - } - else - { - _symbolicLb[j * _size + i] += - bLower * sourceSymbolicUb[j * sourceLayerSize + indexB]; - } - - if ( bLower >= 0 ) - { - _symbolicUb[j * _size + i] += - bUpper * sourceSymbolicUb[j * sourceLayerSize + indexB]; - } - else - { - _symbolicUb[j * _size + i] += - bUpper * sourceSymbolicLb[j * sourceLayerSize + indexB]; - } - } - - _symbolicLowerBias[i] = -coeffs[0] * sourceLbs[0] * sourceLbs[1] - - ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1]; - _symbolicUpperBias[i] = -coeffs[1] * sourceLbs[0] * sourceUbs[1] - - ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1]; - - double lb = FloatUtils::infinity(); - double ub = FloatUtils::negativeInfinity(); - List values = { sourceLbs[0] * sourceLbs[1], - sourceLbs[0] * sourceUbs[1], - sourceUbs[0] * sourceLbs[1], - sourceUbs[0] * sourceUbs[1] }; - for ( const auto &v : values ) - { - if ( v < lb ) - lb = v; - if ( v > ub ) - ub = v; - } - - /* - We now have the symbolic representation for the current - layer. Next, we compute new lower and upper bounds for - it. For each of these bounds, we compute an upper bound and - a lower bound. - */ - _symbolicLbOfLb[i] = _symbolicLowerBias[i]; - _symbolicUbOfLb[i] = _symbolicLowerBias[i]; - _symbolicLbOfUb[i] = _symbolicUpperBias[i]; - _symbolicUbOfUb[i] = _symbolicUpperBias[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); - double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - - double entry = _symbolicLb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfLb[i] += ( entry * inputLb ); - _symbolicUbOfLb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfLb[i] += ( entry * inputUb ); - _symbolicUbOfLb[i] += ( entry * inputLb ); - } - - entry = _symbolicUb[j * _size + i]; - - if ( entry >= 0 ) - { - _symbolicLbOfUb[i] += ( entry * inputLb ); - _symbolicUbOfUb[i] += ( entry * inputUb ); - } - else - { - _symbolicLbOfUb[i] += ( entry * inputUb ); - _symbolicUbOfUb[i] += ( entry * inputLb ); - } - } - - /* - We now have the tightest bounds we can for the relu - variable. If they are tigheter than what was previously - known, store them. - */ - if ( receive ) - { - if ( _lb[i] < _symbolicLbOfLb[i] ) - { - _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); - } - } - } -} - -double Layer::LSELowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - double sum = 0; - for ( unsigned j = 0; j < inputs.size(); ++j ) - { - double lj = inputLbs[j]; - double uj = inputUbs[j]; - double xj = inputs[j]; - sum += - ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); - } - - return std::exp( inputs[i] ) / sum; -} - -double Layer::dLSELowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) -{ - double val = 0; - if ( i == di ) - val += LSELowerBound( inputMids, inputLbs, inputUbs, i ); - - double ldi = inputLbs[di]; - double udi = inputUbs[di]; - - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - double lj = inputLbs[j]; - double uj = inputUbs[j]; - double xj = inputMids[j]; - - sum += - ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); - } - - val -= std::exp( inputMids[i] ) / ( sum * sum ) * ( std::exp( udi ) - std::exp( ldi ) ) / - ( udi - ldi ); - - return val; -} - -double Layer::LSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - double max = FloatUtils::negativeInfinity(); - unsigned maxInputIndex = 0; - unsigned index = 0; - for ( const auto &mid : inputMids ) - { - if ( mid > max ) - { - max = mid; - maxInputIndex = index; - } - ++index; - } - - if ( maxInputIndex == i ) - return ERLowerBound( inputMids, inputLbs, inputUbs, i ); - else - { - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - sum += 1; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - double xjjstar = inputMids[j] - inputMids[maxInputIndex]; - - sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + - ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); - } - } - - return std::exp( inputMids[i] - inputMids[maxInputIndex] ) / sum; - } -} - -double Layer::dLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) -{ - double max = FloatUtils::negativeInfinity(); - unsigned maxInputIndex = 0; - unsigned index = 0; - for ( const auto &mid : inputMids ) - { - if ( mid > max ) - { - max = mid; - maxInputIndex = index; - } - ++index; - } - - if ( maxInputIndex == i ) - return dERLowerBound( inputMids, inputLbs, inputUbs, i, di ); - else - { - double val = LSELowerBound2( inputMids, inputLbs, inputUbs, i ); - - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - sum += 1; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - double xjjstar = inputMids[j] - inputMids[maxInputIndex]; - sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + - ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); - } - } - double val2 = std::exp( inputMids[i] - inputMids[maxInputIndex] ) / ( sum * sum ); - - if ( i == di ) - { - double ldijstar = inputLbs[i] - inputUbs[maxInputIndex]; - double udijstar = inputUbs[i] - inputLbs[maxInputIndex]; - return val - - val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / ( udijstar - ldijstar ); - } - else if ( maxInputIndex == di ) - { - double sum2 = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - continue; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - sum2 += ( std::exp( ujjstar ) - std::exp( ljjstar ) ) / ( ujjstar - ljjstar ); - } - } - return -val + val2 * sum2; - } - else - { - double ldijstar = inputLbs[di] - inputUbs[maxInputIndex]; - double udijstar = inputUbs[di] - inputLbs[maxInputIndex]; - return -val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / - ( udijstar - ldijstar ); - } - } -} - -double Layer::LSEUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; - - Vector inputTilda; - SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); - - return ( ( li * std::log( ui ) - ui * std::log( li ) ) / ( std::log( ui ) - std::log( li ) ) - - ( ui - li ) / ( std::log( ui ) - std::log( li ) ) * - SoftmaxConstraint::logSumOfExponential( inputTilda ) ); -} - -double Layer::dLSEUpperbound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; - - double val = -( ui - li ) / ( std::log( ui ) - std::log( li ) ); - - double val2 = std::exp( inputMids[di] ) / SoftmaxConstraint::sumOfExponential( inputMids ); - if ( i == di ) - val2 -= 1; - - return val * val2; -} - -double Layer::ERLowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - Vector inputTilda; - SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); - - double sum = 0; - for ( unsigned j = 0; j < inputs.size(); ++j ) - { - if ( i == j ) - sum += 1; - else - { - double ljTilda = inputLbs[j] - inputUbs[i]; - double ujTilda = inputUbs[j] - inputLbs[i]; - double xjTilda = inputTilda[j]; - - sum += ( ujTilda - xjTilda ) / ( ujTilda - ljTilda ) * std::exp( ljTilda ) + - ( xjTilda - ljTilda ) / ( ujTilda - ljTilda ) * std::exp( ujTilda ); - } - } - - return 1 / sum; -} - -double Layer::dERLowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) -{ - double val = ERLowerBound( inputMids, inputLbs, inputUbs, i ); - - if ( i != di ) - { - double ldiTilda = inputLbs[di] - inputUbs[i]; - double udiTilda = inputUbs[di] - inputLbs[i]; - return -val * val * ( std::exp( udiTilda ) - std::exp( ldiTilda ) ) / - ( udiTilda - ldiTilda ); - } - else - { - double val2 = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j != i ) - { - double ljTilda = inputLbs[j] - inputUbs[i]; - double ujTilda = inputUbs[j] - inputLbs[i]; - val2 += ( std::exp( ujTilda ) - std::exp( ljTilda ) ) / ( ujTilda - ljTilda ); - } - } - return val * val * val2; - } -} - -double Layer::ERUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; - - Vector inputTilda; - SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); - - return ui + li - ui * li * SoftmaxConstraint::sumOfExponential( inputTilda ); -} - -double Layer::dERUpperBound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; - - - if ( i == di ) - { - double val2 = -1; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - val2 += std::exp( inputMids[j] - inputMids[i] ); - return li * ui * val2; - } - else - return -li * ui * std::exp( inputMids[di] - inputMids[i] ); -} - -double Layer::linearLowerBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - Vector uTilda; - SoftmaxConstraint::xTilda( inputUbs, inputLbs[i], uTilda ); - uTilda[i] = 0; - return 1 / SoftmaxConstraint::sumOfExponential( uTilda ); -} - -double Layer::linearUpperBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - Vector lTilda; - SoftmaxConstraint::xTilda( inputLbs, inputUbs[i], lTilda ); - lTilda[i] = 0; - return 1 / SoftmaxConstraint::sumOfExponential( lTilda ); -} - -void Layer::eliminateVariable( unsigned variable, double value ) -{ - if ( !_variableToNeuron.exists( variable ) ) - return; - - ASSERT( _type != INPUT ); - - unsigned neuron = _variableToNeuron[variable]; - _eliminatedNeurons[neuron] = value; - _lb[neuron] = value; - _ub[neuron] = value; - _neuronToVariable.erase( _variableToNeuron[variable] ); - _variableToNeuron.erase( variable ); -} - -void Layer::updateVariableIndices( const Map &oldIndexToNewIndex, - const Map &mergedVariables ) -{ - // First, do a pass to handle any merged variables - auto neuronIt = _neuronToVariable.begin(); - while ( neuronIt != _neuronToVariable.end() ) - { - unsigned variable = neuronIt->second; - while ( mergedVariables.exists( variable ) ) - { - neuronIt->second = mergedVariables[variable]; - variable = neuronIt->second; - } - - ++neuronIt; - } - - // Now handle re-indexing - neuronIt = _neuronToVariable.begin(); - while ( neuronIt != _neuronToVariable.end() ) - { - unsigned variable = neuronIt->second; - - if ( !oldIndexToNewIndex.exists( variable ) ) - { - // This variable has been eliminated, remove from map - neuronIt = _neuronToVariable.erase( neuronIt ); - } - else - { - if ( oldIndexToNewIndex[variable] == variable ) - { - // Index hasn't changed, skip - } - else - { - // Index has changed - neuronIt->second = oldIndexToNewIndex[variable]; - } - - ++neuronIt; - continue; - } - } -} - -unsigned Layer::getLayerIndex() const -{ - return _layerIndex; -} - -Layer::Layer( const Layer *other ) - : _bias( NULL ) - , _assignment( NULL ) - , _lb( NULL ) - , _ub( NULL ) - , _inputLayerSize( 0 ) - , _symbolicLb( NULL ) - , _symbolicUb( NULL ) - , _symbolicLowerBias( NULL ) - , _symbolicUpperBias( NULL ) - , _symbolicLbOfLb( NULL ) - , _symbolicUbOfLb( NULL ) - , _symbolicLbOfUb( NULL ) - , _symbolicUbOfUb( NULL ) -{ - _layerIndex = other->_layerIndex; - _type = other->_type; - _size = other->_size; - _layerOwner = other->_layerOwner; - _alpha = other->_alpha; - - allocateMemory(); - - for ( auto &sourceLayerEntry : other->_sourceLayers ) - { - addSourceLayer( sourceLayerEntry.first, sourceLayerEntry.second ); - - if ( other->_layerToWeights.exists( sourceLayerEntry.first ) ) - memcpy( _layerToWeights[sourceLayerEntry.first], - other->_layerToWeights[sourceLayerEntry.first], - sizeof( double ) * sourceLayerEntry.second * _size ); - - if ( other->_layerToPositiveWeights.exists( sourceLayerEntry.first ) ) - memcpy( _layerToPositiveWeights[sourceLayerEntry.first], - other->_layerToPositiveWeights[sourceLayerEntry.first], - sizeof( double ) * sourceLayerEntry.second * _size ); - - if ( other->_layerToNegativeWeights.exists( sourceLayerEntry.first ) ) - memcpy( _layerToNegativeWeights[sourceLayerEntry.first], - other->_layerToNegativeWeights[sourceLayerEntry.first], - sizeof( double ) * sourceLayerEntry.second * _size ); - } - - _successorLayers = other->_successorLayers; - - if ( other->_bias ) - memcpy( _bias, other->_bias, sizeof( double ) * _size ); - - _neuronToActivationSources = other->_neuronToActivationSources; - - _neuronToVariable = other->_neuronToVariable; - _variableToNeuron = other->_variableToNeuron; - _eliminatedNeurons = other->_eliminatedNeurons; - - _inputLayerSize = other->_inputLayerSize; -} - -const double *Layer::getSymbolicLb() const -{ - return _symbolicLb; -} - -const double *Layer::getSymbolicUb() const -{ - return _symbolicUb; -} - -const double *Layer::getSymbolicLowerBias() const -{ - return _symbolicLowerBias; -} - -const double *Layer::getSymbolicUpperBias() const -{ - return _symbolicUpperBias; -} - -double Layer::getSymbolicLbOfLb( unsigned neuron ) const -{ - return _symbolicLbOfLb[neuron]; -} - -double Layer::getSymbolicUbOfLb( unsigned neuron ) const -{ - return _symbolicUbOfLb[neuron]; -} - -double Layer::getSymbolicLbOfUb( unsigned neuron ) const -{ - return _symbolicLbOfUb[neuron]; -} - -double Layer::getSymbolicUbOfUb( unsigned neuron ) const -{ - return _symbolicUbOfUb[neuron]; -} - -void Layer::freeMemoryIfNeeded() -{ - for ( const auto &weights : _layerToWeights ) - delete[] weights.second; - _layerToWeights.clear(); - - for ( const auto &weights : _layerToPositiveWeights ) - delete[] weights.second; - _layerToPositiveWeights.clear(); - - for ( const auto &weights : _layerToNegativeWeights ) - delete[] weights.second; - _layerToNegativeWeights.clear(); - - if ( _bias ) - { - delete[] _bias; - _bias = NULL; - } - - if ( _assignment ) - { - delete[] _assignment; - _assignment = NULL; - } - - if ( _lb ) - { - delete[] _lb; - _lb = NULL; - } - - if ( _ub ) - { - delete[] _ub; - _ub = NULL; - } - - if ( _symbolicLb ) - { - delete[] _symbolicLb; - _symbolicLb = NULL; - } - - if ( _symbolicUb ) - { - delete[] _symbolicUb; - _symbolicUb = NULL; - } - - if ( _symbolicLowerBias ) - { - delete[] _symbolicLowerBias; - _symbolicLowerBias = NULL; - } - - if ( _symbolicUpperBias ) - { - delete[] _symbolicUpperBias; - _symbolicUpperBias = NULL; - } - - if ( _symbolicLbOfLb ) - { - delete[] _symbolicLbOfLb; - _symbolicLbOfLb = NULL; - } - - if ( _symbolicUbOfLb ) - { - delete[] _symbolicUbOfLb; - _symbolicUbOfLb = NULL; - } - - if ( _symbolicLbOfUb ) - { - delete[] _symbolicLbOfUb; - _symbolicLbOfUb = NULL; - } - - if ( _symbolicUbOfUb ) - { - delete[] _symbolicUbOfUb; - _symbolicUbOfUb = NULL; - } -} - -String Layer::typeToString( Type type ) -{ - switch ( type ) - { - case INPUT: - return "INPUT"; - break; - - case WEIGHTED_SUM: - return "WEIGHTED_SUM"; - break; - - case RELU: - return "RELU"; - break; - - case LEAKY_RELU: - return "LEAKY_RELU"; - break; - - case SIGMOID: - return "SIGMOID"; - break; - - case ABSOLUTE_VALUE: - return "ABSOLUTE_VALUE"; - break; - - case MAX: - return "MAX"; - break; - - case SIGN: - return "SIGN"; - break; - - case ROUND: - return "ROUND"; - break; - - case SOFTMAX: - return "SOFTMAX"; - break; - - case BILINEAR: - return "BILINEAR"; - break; - - default: - return "UNKNOWN TYPE"; - break; - } -} - -void Layer::dump() const -{ - printf( "\nDumping info for layer %u (%s):\n", _layerIndex, typeToString( _type ).ascii() ); - printf( "\tNeurons:\n" ); - - switch ( _type ) - { - case INPUT: - printf( "\t\t" ); - for ( unsigned i = 0; i < _size; ++i ) - printf( "x%u ", _neuronToVariable[i] ); - - printf( "\n\n" ); - break; - - case WEIGHTED_SUM: - - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - printf( "\t\tNeuron %u = %+.4lf\t[ELIMINATED]\n", i, _eliminatedNeurons[i] ); - continue; - } - - printf( "\t\tx%u = %+.4lf\n\t\t\t", _neuronToVariable[i], _bias[i] ); - for ( const auto &sourceLayerEntry : _sourceLayers ) - { - const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerEntry.first ); - for ( unsigned j = 0; j < sourceLayer->getSize(); ++j ) - { - double weight = _layerToWeights[sourceLayerEntry.first][j * _size + i]; - if ( !FloatUtils::isZero( weight ) ) - { - if ( sourceLayer->_neuronToVariable.exists( j ) ) - printf( "%+.5lfx%u ", weight, sourceLayer->_neuronToVariable[j] ); - else - printf( "%+.5lf", weight * sourceLayer->_eliminatedNeurons[j] ); - } - } - } - printf( "\n" ); - } - - printf( "\n" ); - break; - - case RELU: - case ROUND: - case LEAKY_RELU: - case ABSOLUTE_VALUE: - case MAX: - case SIGN: - case SIGMOID: - case BILINEAR: - case SOFTMAX: - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _eliminatedNeurons.exists( i ) ) - { - printf( "\t\tNeuron %u = %+.4lf\t[ELIMINATED]\n", i, _eliminatedNeurons[i] ); - continue; - } - - printf( "\t\tSources for x%u: ", _neuronToVariable[i] ); - for ( const auto &source : _neuronToActivationSources[i] ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - if ( sourceLayer->_neuronToVariable.exists( source._neuron ) ) - printf( "x%u ", sourceLayer->_neuronToVariable[source._neuron] ); - else - printf( "Neuron_%u (eliminated)", source._neuron ); - } - - printf( "\n" ); - } - - printf( "\n" ); - break; - } -} - -bool Layer::neuronHasVariable( unsigned neuron ) const -{ - ASSERT( _neuronToVariable.exists( neuron ) || _eliminatedNeurons.exists( neuron ) ); - return _neuronToVariable.exists( neuron ); -} - -unsigned Layer::neuronToVariable( unsigned neuron ) const -{ - DEBUG( { - if ( !_neuronToVariable.exists( neuron ) ) - ASSERT( _eliminatedNeurons.exists( neuron ) ); - } ) - - ASSERT( _neuronToVariable.exists( neuron ) ); - return _neuronToVariable[neuron]; -} - -unsigned Layer::variableToNeuron( unsigned variable ) const -{ - ASSERT( _variableToNeuron.exists( variable ) ); - return _variableToNeuron[variable]; -} - -bool Layer::neuronEliminated( unsigned neuron ) const -{ - return _eliminatedNeurons.exists( neuron ); -} - -double Layer::getEliminatedNeuronValue( unsigned neuron ) const -{ - ASSERT( _eliminatedNeurons.exists( neuron ) ); - return _eliminatedNeurons[neuron]; -} - -void Layer::reduceIndexFromAllMaps( unsigned startIndex ) -{ - // Adjust the source layers - Map copyOfSources = _sourceLayers; - _sourceLayers.clear(); - for ( const auto &pair : copyOfSources ) - _sourceLayers[pair.first >= startIndex ? pair.first - 1 : pair.first] = pair.second; - - // Adjust all weight maps - adjustWeightMapIndexing( _layerToWeights, startIndex ); - adjustWeightMapIndexing( _layerToPositiveWeights, startIndex ); - adjustWeightMapIndexing( _layerToNegativeWeights, startIndex ); - - // Adjust the neuron activations - for ( auto &neuronToSources : _neuronToActivationSources ) - { - for ( auto &source : neuronToSources.second ) - { - if ( source._layer >= startIndex ) - --source._layer; - } - } -} - -void Layer::adjustWeightMapIndexing( Map &map, unsigned startIndex ) -{ - Map copyOfWeights = map; - map.clear(); - for ( const auto &pair : copyOfWeights ) - map[pair.first >= startIndex ? pair.first - 1 : pair.first] = pair.second; -} - -void Layer::reduceIndexAfterMerge( unsigned startIndex ) -{ - if ( _layerIndex >= startIndex ) - --_layerIndex; -} - -bool Layer::operator==( const Layer &layer ) const -{ - if ( _layerIndex != layer._layerIndex ) - return false; - - if ( _type != layer._type ) - return false; - - if ( _size != layer._size ) - return false; - - if ( _inputLayerSize != layer._inputLayerSize ) - return false; - - if ( ( _bias && !layer._bias ) || ( !_bias && layer._bias ) ) - return false; - - if ( _bias && layer._bias ) - { - if ( std::memcmp( _bias, layer._bias, _size * sizeof( double ) ) != 0 ) - return false; - } - - if ( _sourceLayers != layer._sourceLayers ) - return false; - - if ( !compareWeights( _layerToWeights, layer._layerToWeights ) ) - return false; - - if ( !compareWeights( _layerToPositiveWeights, layer._layerToPositiveWeights ) ) - return false; - - if ( !compareWeights( _layerToNegativeWeights, layer._layerToNegativeWeights ) ) - return false; - - return true; -} - -bool Layer::compareWeights( const Map &map, - const Map &mapOfOtherLayer ) const -{ - if ( map.size() != mapOfOtherLayer.size() ) - return false; - - for ( const auto &pair : map ) - { - unsigned key = pair.first; - double *value = pair.second; - - if ( !mapOfOtherLayer.exists( key ) ) - return false; - - if ( std::memcmp( - value, mapOfOtherLayer[key], _size * _sourceLayers[key] * sizeof( double ) ) != 0 ) - { - return false; - } - } - - return true; -} - -unsigned Layer::getMaxVariable() const -{ - unsigned result = 0; - for ( const auto &pair : _neuronToVariable ) - if ( pair.second > result ) - result = pair.second; - - return result; -} - -void Layer::dumpBounds() const -{ - printf( "Layer %u:\n", _layerIndex ); - for ( unsigned i = 0; i < _size; ++i ) - { - printf( "\tNeuron%u\tLB: %.4f, UB: %.4f \n", i + 1, _lb[i], _ub[i] ); - } - printf( "\n" ); -} - -} // namespace NLR From 0eef08c6375428a61db31ed60ca083ecc3ab8864 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 16 Oct 2025 20:51:49 +0300 Subject: [PATCH 77/81] Manually Sync Fork: Towards Proof Minimization (#889), plus: 1. Various corrections in generatePolyongalTightening, Extended INVPROP and hyperparameters, initializeSymbolicBounds (deepPoly module), Gradient-based PMNR heuristic, unit tests. 2. Implemented calculations of branch symbolic bounds in NLR. 3. Implemented branching after Extended INVPROP to derive stronger bounds. 4. Implemented detection of some infeasible branch combinations during INVPROP. 5. Implemented heuristic generation of branching points and tightening loss. 6. Wrote unit tests for parameterised SBT/DeepPoly in Test_NetworkLevelReasoner/Test_DeepPolyAnalysis.h. 7. Implemented BBPS-based heuristic for PMNR. 7. Wrote Test_PMNRSelection.h, unit test file for symbolic bound maps, branching points and PMNR neuron scores. 8. Wrote Test_PMNR.h, unit test file for backward-invprop, backward-pmnr-(random/gradient/bbps) bound tightening options. --- CHANGELOG.md | 3 + maraboupy/MarabouCore.cpp | 8 +- src/common/Pair.h | 5 + src/common/Statistics.cpp | 25 +- src/common/Statistics.h | 9 +- src/configuration/GlobalConfiguration.cpp | 24 +- src/configuration/GlobalConfiguration.h | 15 +- src/engine/AbsoluteValueConstraint.cpp | 54 +- src/engine/BoundManager.cpp | 26 +- src/engine/BoundManager.h | 4 +- ...{CDSmtCore.cpp => CDSearchTreeHandler.cpp} | 91 +- .../{CDSmtCore.h => CDSearchTreeHandler.h} | 39 +- src/engine/CMakeLists.txt | 6 +- src/engine/DnCWorker.cpp | 23 +- src/engine/Engine.cpp | 165 +- src/engine/Engine.h | 98 +- src/engine/EngineState.h | 2 +- src/engine/IBoundManager.h | 17 +- src/engine/IEngine.h | 20 +- src/engine/LeakyReluConstraint.cpp | 76 +- src/engine/MaxConstraint.cpp | 22 +- src/engine/MaxConstraint.h | 2 +- src/engine/PiecewiseLinearConstraint.cpp | 1 + src/engine/PiecewiseLinearConstraint.h | 6 +- src/engine/PolygonalTightening.h | 32 +- src/engine/PrecisionRestorer.cpp | 25 +- src/engine/PrecisionRestorer.h | 4 +- src/engine/ReluConstraint.cpp | 74 +- .../{SmtCore.cpp => SearchTreeHandler.cpp} | 93 +- src/engine/{SmtCore.h => SearchTreeHandler.h} | 51 +- ...SmtStackEntry.h => SearchTreeStackEntry.h} | 16 +- src/engine/{SmtState.h => SearchTreeState.h} | 16 +- src/engine/SignConstraint.cpp | 35 +- src/engine/SubQuery.h | 4 +- src/engine/Tableau.cpp | 23 +- src/engine/tests/MockBoundManager.h | 4 +- src/engine/tests/MockEngine.h | 40 +- ...est_SmtCore.h => Test_SearchTreeHandler.h} | 152 +- src/nlr/CMakeLists.txt | 2 +- src/nlr/DeepPolyAbsoluteValueElement.cpp | 5 +- src/nlr/DeepPolyBilinearElement.cpp | 15 +- src/nlr/DeepPolyElement.cpp | 579 +- src/nlr/DeepPolyLeakyReLUElement.cpp | 6 +- src/nlr/DeepPolyMaxPoolElement.cpp | 2 +- src/nlr/DeepPolyReLUElement.cpp | 5 +- src/nlr/DeepPolyRoundElement.cpp | 5 +- src/nlr/DeepPolySigmoidElement.cpp | 5 +- src/nlr/DeepPolySignElement.cpp | 5 +- src/nlr/DeepPolySoftmaxElement.cpp | 15 +- src/nlr/DeepPolyWeightedSumElement.cpp | 1 - src/nlr/LPFormulator.cpp | 53 +- src/nlr/LPFormulator.h | 3 +- src/nlr/Layer.cpp | 436 +- src/nlr/Layer.h | 18 +- src/nlr/LayerOwner.h | 2 +- src/nlr/NLRError.h | 2 +- src/nlr/NetworkLevelReasoner.cpp | 2942 +++- src/nlr/NetworkLevelReasoner.h | 346 +- src/nlr/tests/Test_DeepPolyAnalysis.h | 5825 +++++++- src/nlr/tests/Test_LPRelaxation.h | 144 +- src/nlr/tests/Test_NetworkLevelReasoner.h | 2652 +--- src/nlr/tests/Test_PMNR.h | 11529 ++-------------- src/nlr/tests/Test_PMNRSelection.h | 3747 +++-- src/proofs/Checker.cpp | 6 + src/proofs/PlcLemma.cpp | 22 +- src/proofs/PlcLemma.h | 10 +- src/proofs/UnsatCertificateNode.cpp | 8 + src/proofs/UnsatCertificateNode.h | 5 + src/proofs/UnsatCertificateUtils.cpp | 88 +- src/proofs/UnsatCertificateUtils.h | 17 + src/proofs/tests/Test_UnsatCertificateNode.h | 6 +- 71 files changed, 14128 insertions(+), 15688 deletions(-) rename src/engine/{CDSmtCore.cpp => CDSearchTreeHandler.cpp} (79%) rename src/engine/{CDSmtCore.h => CDSearchTreeHandler.h} (86%) rename src/engine/{SmtCore.cpp => SearchTreeHandler.cpp} (86%) rename src/engine/{SmtCore.h => SearchTreeHandler.h} (79%) rename src/engine/{SmtStackEntry.h => SearchTreeStackEntry.h} (80%) rename src/engine/{SmtState.h => SearchTreeState.h} (82%) rename src/engine/tests/{Test_SmtCore.h => Test_SearchTreeHandler.h} (75%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f6fda64c2..16a990d354 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,9 @@ - Implemented backward analysis using preimage-approximation algorithm for `Relu`, `LeakyRelu`, `Sign` and `Bilinear` Layers. - Added the BaBSR heuristic as a new branching strategy for ReLU Splitting - Support Sub of two variables, "Mul" of two constants, Slice, and ConstantOfShape in the python onnx parser + - Implemented backward analysis using INVPROP algorithm with added support for all activation functions. + - Implemented backward analysis using partial multi-neuron relaxation with BBPS-based heuristic for neuron selection. + - Renamed SmtCore module to SearchTreeHandler ## Version 2.0.0 diff --git a/maraboupy/MarabouCore.cpp b/maraboupy/MarabouCore.cpp index 8d35646b13..3bbfc9c51b 100644 --- a/maraboupy/MarabouCore.cpp +++ b/maraboupy/MarabouCore.cpp @@ -833,8 +833,8 @@ PYBIND11_MODULE( MarabouCore, m ) .value( "NUM_POPS", Statistics::StatisticsUnsignedAttribute::NUM_POPS ) .value( "CURRENT_DECISION_LEVEL", Statistics::StatisticsUnsignedAttribute::CURRENT_DECISION_LEVEL ) - .value( "NUM_PL_SMT_ORIGINATED_SPLITS", - Statistics::StatisticsUnsignedAttribute::NUM_PL_SMT_ORIGINATED_SPLITS ) + .value( "NUM_PL_SEARCH_TREE_ORIGINATED_SPLITS", + Statistics::StatisticsUnsignedAttribute::NUM_PL_SEARCH_TREE_ORIGINATED_SPLITS ) .value( "NUM_VISITED_TREE_STATES", Statistics::StatisticsUnsignedAttribute::NUM_VISITED_TREE_STATES ) .value( "PP_NUM_TIGHTENING_ITERATIONS", @@ -862,8 +862,8 @@ PYBIND11_MODULE( MarabouCore, m ) py::enum_( m, "StatisticsLongAttribute" ) .value( "NUM_TIGHTENINGS_FROM_EXPLICIT_BASIS", Statistics::StatisticsLongAttribute::NUM_TIGHTENINGS_FROM_EXPLICIT_BASIS ) - .value( "TOTAL_TIME_SMT_CORE_MICRO", - Statistics::StatisticsLongAttribute::TOTAL_TIME_SMT_CORE_MICRO ) + .value( "TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO", + Statistics::StatisticsLongAttribute::TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO ) .value( "TOTAL_TIME_PERFORMING_VALID_CASE_SPLITS_MICRO", Statistics::StatisticsLongAttribute::TOTAL_TIME_PERFORMING_VALID_CASE_SPLITS_MICRO ) .value( "PREPROCESSING_TIME_MICRO", diff --git a/src/common/Pair.h b/src/common/Pair.h index 7cf7d46b5e..f4ca7a9d43 100644 --- a/src/common/Pair.h +++ b/src/common/Pair.h @@ -33,6 +33,11 @@ template class Pair { } + Pair( const Pair &other ) + : _container( other._container ) + { + } + L &first() { return _container.first; diff --git a/src/common/Statistics.cpp b/src/common/Statistics.cpp index b9f7e72e52..9202a9a759 100644 --- a/src/common/Statistics.cpp +++ b/src/common/Statistics.cpp @@ -24,7 +24,7 @@ Statistics::Statistics() _unsignedAttributes[NUM_PL_CONSTRAINTS] = 0; _unsignedAttributes[NUM_ACTIVE_PL_CONSTRAINTS] = 0; _unsignedAttributes[NUM_PL_VALID_SPLITS] = 0; - _unsignedAttributes[NUM_PL_SMT_ORIGINATED_SPLITS] = 0; + _unsignedAttributes[NUM_PL_SEARCH_TREE_ORIGINATED_SPLITS] = 0; _unsignedAttributes[NUM_PRECISION_RESTORATIONS] = 0; _unsignedAttributes[CURRENT_DECISION_LEVEL] = 0; _unsignedAttributes[MAX_DECISION_LEVEL] = 0; @@ -43,6 +43,7 @@ Statistics::Statistics() _unsignedAttributes[NUM_CERTIFIED_LEAVES] = 0; _unsignedAttributes[NUM_DELEGATED_LEAVES] = 0; _unsignedAttributes[NUM_LEMMAS] = 0; + _unsignedAttributes[NUM_LEMMAS_USED] = 0; _unsignedAttributes[CERTIFIED_UNSAT] = 0; _longAttributes[NUM_MAIN_LOOP_ITERATIONS] = 0; @@ -82,7 +83,7 @@ Statistics::Statistics() _longAttributes[TOTAL_TIME_PRECISION_RESTORATION] = 0; _longAttributes[TOTAL_TIME_CONSTRAINT_MATRIX_BOUND_TIGHTENING_MICRO] = 0; _longAttributes[TOTAL_TIME_APPLYING_STORED_TIGHTENINGS_MICRO] = 0; - _longAttributes[TOTAL_TIME_SMT_CORE_MICRO] = 0; + _longAttributes[TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO] = 0; _longAttributes[TOTAL_TIME_UPDATING_SOI_PHASE_PATTERN_MICRO] = 0; _longAttributes[NUM_PROPOSED_PHASE_PATTERN_UPDATE] = 0; _longAttributes[NUM_ACCEPTED_PHASE_PATTERN_UPDATE] = 0; @@ -208,11 +209,11 @@ void Statistics::print() printf( "\t\t[%.2lf%%] Applying stored bound-tightening: %llu milli\n", printPercents( totalTimeApplyingStoredTighteningsMicro, timeMainLoopMicro ), totalTimeApplyingStoredTighteningsMicro / 1000 ); - unsigned long long totalTimeSmtCoreMicro = - getLongAttribute( Statistics::TOTAL_TIME_SMT_CORE_MICRO ); - printf( "\t\t[%.2lf%%] SMT core: %llu milli\n", - printPercents( totalTimeSmtCoreMicro, timeMainLoopMicro ), - totalTimeSmtCoreMicro / 1000 ); + unsigned long long totalTimeSearchTreeMicro = + getLongAttribute( Statistics::TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO ); + printf( "\t\t[%.2lf%%] Search Tree Handler : %llu milli\n", + printPercents( totalTimeSearchTreeMicro, timeMainLoopMicro ), + totalTimeSearchTreeMicro / 1000 ); unsigned long long totalTimePerformingSymbolicBoundTightening = getLongAttribute( Statistics::TOTAL_TIME_PERFORMING_SYMBOLIC_BOUND_TIGHTENING ); printf( "\t\t[%.2lf%%] Symbolic Bound Tightening: %llu milli\n", @@ -234,7 +235,7 @@ void Statistics::print() totalTimePerformingValidCaseSplitsMicro + totalTimeHandlingStatisticsMicro + totalTimeExplicitBasisBoundTighteningMicro + totalTimeDegradationChecking + totalTimePrecisionRestoration + totalTimeConstraintMatrixBoundTighteningMicro + - totalTimeApplyingStoredTighteningsMicro + totalTimeSmtCoreMicro + + totalTimeApplyingStoredTighteningsMicro + totalTimeSearchTreeMicro + totalTimePerformingSymbolicBoundTightening; printf( "\t\t[%.2lf%%] Unaccounted for: %llu milli\n", @@ -270,11 +271,11 @@ void Statistics::print() printAverage( timeConstraintFixingStepsMicro / 1000, numConstraintFixingSteps ) ); printf( "\tNumber of active piecewise-linear constraints: %u / %u\n" "\t\tConstraints disabled by valid splits: %u. " - "By SMT-originated splits: %u\n", + "By Search-Tree--originated splits: %u\n", getUnsignedAttribute( Statistics::NUM_ACTIVE_PL_CONSTRAINTS ), getUnsignedAttribute( Statistics::NUM_PL_CONSTRAINTS ), getUnsignedAttribute( Statistics::NUM_PL_VALID_SPLITS ), - getUnsignedAttribute( Statistics::NUM_PL_SMT_ORIGINATED_SPLITS ) ); + getUnsignedAttribute( Statistics::NUM_PL_SEARCH_TREE_ORIGINATED_SPLITS ) ); printf( "\tLast reported degradation: %.10lf. Max degradation so far: %.10lf. " "Restorations so far: %u\n", getDoubleAttribute( Statistics::CURRENT_DEGRADATION ), @@ -314,7 +315,7 @@ void Statistics::print() getUnsignedAttribute( Statistics::CURRENT_TABLEAU_M ), getUnsignedAttribute( Statistics::CURRENT_TABLEAU_N ) ); - printf( "\t--- SMT Core Statistics ---\n" ); + printf( "\t--- Search Tree Handler Statistics ---\n" ); printf( "\tTotal depth is %u. Total visited states: %u. Number of splits: %u. Number of pops: %u\n", getUnsignedAttribute( Statistics::CURRENT_DECISION_LEVEL ), @@ -428,6 +429,8 @@ void Statistics::print() printf( "\tNumber of leaves to delegate: %u\n", getUnsignedAttribute( Statistics::NUM_DELEGATED_LEAVES ) ); printf( "\tNumber of lemmas: %u\n", getUnsignedAttribute( Statistics::NUM_LEMMAS ) ); + printf( "\tNumber of lemmas used in proof minimization: %u\n", + getUnsignedAttribute( Statistics::NUM_LEMMAS_USED ) ); } unsigned long long Statistics::getTotalTimeInMicro() const diff --git a/src/common/Statistics.h b/src/common/Statistics.h index 4761594b4e..0454d9d22c 100644 --- a/src/common/Statistics.h +++ b/src/common/Statistics.h @@ -31,12 +31,12 @@ class Statistics NUM_PL_CONSTRAINTS, NUM_ACTIVE_PL_CONSTRAINTS, NUM_PL_VALID_SPLITS, - NUM_PL_SMT_ORIGINATED_SPLITS, + NUM_PL_SEARCH_TREE_ORIGINATED_SPLITS, // Precision restoration NUM_PRECISION_RESTORATIONS, - // Current and max stack depth in the SMT core + // Current and max stack depth in the Search Tree Handler CURRENT_DECISION_LEVEL, MAX_DECISION_LEVEL, @@ -71,6 +71,7 @@ class Statistics NUM_CERTIFIED_LEAVES, NUM_DELEGATED_LEAVES, NUM_LEMMAS, + NUM_LEMMAS_USED, // 1 if returned UNSAT and proof was certified by proof checker, 0 otherwise. CERTIFIED_UNSAT, @@ -188,8 +189,8 @@ class Statistics // Total amount of time spent applying previously stored bound tightenings TOTAL_TIME_APPLYING_STORED_TIGHTENINGS_MICRO, - // Total amount of time spent within the SMT core - TOTAL_TIME_SMT_CORE_MICRO, + // Total amount of time spent within the search tree core + TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO, // Total time heuristically updating the SoI phase pattern TOTAL_TIME_UPDATING_SOI_PHASE_PATTERN_MICRO, diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index bfd4304643..aa5429355e 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -62,7 +62,7 @@ const unsigned GlobalConfiguration::MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS = 5; const DivideStrategy GlobalConfiguration::SPLITTING_HEURISTICS = DivideStrategy::ReLUViolation; const unsigned GlobalConfiguration::INTERVAL_SPLITTING_FREQUENCY = 10; const unsigned GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD = 10; -const unsigned GlobalConfiguration::BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY = 100; +const unsigned GlobalConfiguration::BOUND_TIGHTENING_ON_CONSTRAINT_MATRIX_FREQUENCY = 100; const unsigned GlobalConfiguration::ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS = 20; const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; @@ -76,16 +76,17 @@ const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.25; const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY = 0; -const unsigned GlobalConfiguration::INVPROP_MAX_ITERATIONS = 25000; -const double GlobalConfiguration::INVPROP_STEP_SIZE = 0.0025; -const double GlobalConfiguration::INVPROP_LEARNING_RATE = 0.25; -const double GlobalConfiguration::INVPROP_WEIGHT_DECAY = 0; +const unsigned GlobalConfiguration::INVPROP_MAX_ITERATIONS = 10; +const double GlobalConfiguration::INVPROP_STEP_SIZE = 0.025; +const double GlobalConfiguration::INVPROP_LEARNING_RATE = 0.005; +const double GlobalConfiguration::INVPROP_WEIGHT_DECAY = 0.5; const double GlobalConfiguration::INVPROP_INITIAL_ALPHA = 0.5; const double GlobalConfiguration::INVPROP_INITIAL_GAMMA = 0.025; const unsigned GlobalConfiguration::PMNR_RANDOM_SEED = 1; -const unsigned GlobalConfiguration::PMNR_MAX_ITERATIONS = 100; -const unsigned GlobalConfiguration::PMNR_SELECTED_NEURONS = 3; +const unsigned GlobalConfiguration::PMNR_MAX_ITERATIONS = 1; +const unsigned GlobalConfiguration::PMNR_SELECTED_NEURONS = 2; +const unsigned GlobalConfiguration::PMNR_BBPS_BRANCHING_CANDIDATES = 10; const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; @@ -139,6 +140,9 @@ const unsigned GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH = 3; const unsigned GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS = 10; const unsigned GlobalConfiguration::MAX_ROUNDS_OF_PMNR_BACKWARD_ANALYSIS = 10; +const bool GlobalConfiguration::ANALYZE_PROOF_DEPENDENCIES = false; +const bool GlobalConfiguration::MINIMIZE_PROOF_DEPENDENCIES = false; + #ifdef ENABLE_GUROBI const unsigned GlobalConfiguration::GUROBI_NUMBER_OF_THREADS = 1; const bool GlobalConfiguration::GUROBI_LOGGING = false; @@ -148,7 +152,7 @@ const bool GlobalConfiguration::GUROBI_LOGGING = false; const bool GlobalConfiguration::DNC_MANAGER_LOGGING = false; const bool GlobalConfiguration::ENGINE_LOGGING = false; const bool GlobalConfiguration::TABLEAU_LOGGING = false; -const bool GlobalConfiguration::SMT_CORE_LOGGING = false; +const bool GlobalConfiguration::SEARCH_TREE_HANDLER_LOGGING = false; const bool GlobalConfiguration::DANTZIGS_RULE_LOGGING = false; const bool GlobalConfiguration::BASIS_FACTORIZATION_LOGGING = false; const bool GlobalConfiguration::PREPROCESSOR_LOGGING = false; @@ -194,8 +198,8 @@ void GlobalConfiguration::print() printf( " GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD: %.15lf\n", GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD ); printf( " MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS: %u\n", MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS ); - printf( " BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY: %u\n", - BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY ); + printf( " BOUND_TIGHTENING_ON_CONSTRAINT_MATRIX_FREQUENCY: %u\n", + BOUND_TIGHTENING_ON_CONSTRAINT_MATRIX_FREQUENCY ); printf( " COST_FUNCTION_ERROR_THRESHOLD: %.15lf\n", COST_FUNCTION_ERROR_THRESHOLD ); printf( " USE_HARRIS_RATIO_TEST: %s\n", USE_HARRIS_RATIO_TEST ? "Yes" : "No" ); diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index 3efd2cae60..d31c9cc874 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -139,7 +139,7 @@ class GlobalConfiguration static const unsigned INTERVAL_SPLITTING_THRESHOLD; // How often should we perform full bound tightening, on the entire contraints matrix A. - static const unsigned BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY; + static const unsigned BOUND_TIGHTENING_ON_CONSTRAINT_MATRIX_FREQUENCY; // When the row bound tightener is asked to run until saturation, it can enter an infinite loop // due to tiny increments in bounds. This number limits the number of iterations it can perform. @@ -199,6 +199,9 @@ class GlobalConfiguration // Random seed for PMNR (with heuristically selected hyperplanes). static const unsigned PMNR_SELECTED_NEURONS; + // Number of candidates for PMNR-BBPS branching points. + static const unsigned PMNR_BBPS_BRANCHING_CANDIDATES; + // How often should projected steepest edge reset the reference space? static const unsigned PSE_ITERATIONS_BEFORE_RESET; @@ -315,6 +318,14 @@ class GlobalConfiguration */ static const unsigned MAX_ROUNDS_OF_PMNR_BACKWARD_ANALYSIS; + /* Analyze lemma dependencies when producing proofs + */ + static const bool ANALYZE_PROOF_DEPENDENCIES; + + /* Minimize the number of lemma dependencies when producing proofs + */ + static const bool MINIMIZE_PROOF_DEPENDENCIES; + #ifdef ENABLE_GUROBI /* The number of threads Gurobi spawns @@ -329,7 +340,7 @@ class GlobalConfiguration static const bool DNC_MANAGER_LOGGING; static const bool ENGINE_LOGGING; static const bool TABLEAU_LOGGING; - static const bool SMT_CORE_LOGGING; + static const bool SEARCH_TREE_HANDLER_LOGGING; static const bool DANTZIGS_RULE_LOGGING; static const bool BASIS_FACTORIZATION_LOGGING; static const bool PREPROCESSOR_LOGGING; diff --git a/src/engine/AbsoluteValueConstraint.cpp b/src/engine/AbsoluteValueConstraint.cpp index a9763980ad..5954716b65 100644 --- a/src/engine/AbsoluteValueConstraint.cpp +++ b/src/engine/AbsoluteValueConstraint.cpp @@ -152,7 +152,9 @@ void AbsoluteValueConstraint::notifyLowerBound( unsigned variable, double bound Tightening::UB, { variable, variable }, Tightening::UB, - getType() ); + *this, + false, + fUpperBound ); else if ( proofs && phaseFixed() ) { std::shared_ptr tighteningRow = @@ -232,7 +234,9 @@ void AbsoluteValueConstraint::notifyUpperBound( unsigned variable, double bound Tightening::UB, { variable, variable }, Tightening::UB, - getType() ); + *this, + false, + fUpperBound ); else if ( proofs && phaseFixed() ) { std::shared_ptr tighteningRow = @@ -406,7 +410,7 @@ AbsoluteValueConstraint::getSmartFixes( ITableau * /* tableau */ ) const List AbsoluteValueConstraint::getCaseSplits() const { - ASSERT( _phaseStatus == PhaseStatus::PHASE_NOT_FIXED ); + ASSERT( getPhaseStatus() == PhaseStatus::PHASE_NOT_FIXED ); List splits; splits.append( getNegativeSplit() ); @@ -476,14 +480,14 @@ PiecewiseLinearCaseSplit AbsoluteValueConstraint::getPositiveSplit() const bool AbsoluteValueConstraint::phaseFixed() const { - return _phaseStatus != PhaseStatus::PHASE_NOT_FIXED; + return getPhaseStatus() != PhaseStatus::PHASE_NOT_FIXED; } PiecewiseLinearCaseSplit AbsoluteValueConstraint::getImpliedCaseSplit() const { - ASSERT( _phaseStatus != PHASE_NOT_FIXED ); + ASSERT( getPhaseStatus() != PHASE_NOT_FIXED ); - if ( _phaseStatus == ABS_PHASE_POSITIVE ) + if ( getPhaseStatus() == ABS_PHASE_POSITIVE ) return getPositiveSplit(); return getNegativeSplit(); @@ -512,8 +516,8 @@ void AbsoluteValueConstraint::dump( String &output ) const _f, _b, _constraintActive ? "Yes" : "No", - _phaseStatus, - phaseToString( _phaseStatus ).ascii() ); + getPhaseStatus(), + phaseToString( getPhaseStatus() ).ascii() ); output += Stringf( "b in [%s, %s], ", @@ -826,7 +830,7 @@ void AbsoluteValueConstraint::fixPhaseIfNeeded() setPhaseStatus( ABS_PHASE_POSITIVE ); if ( proofs ) _boundManager->addLemmaExplanationAndTightenBound( - _posAux, 0, Tightening::UB, { _b }, Tightening::LB, getType() ); + _posAux, 0, Tightening::UB, { _b }, Tightening::LB, *this, true, 0 ); return; } @@ -836,7 +840,7 @@ void AbsoluteValueConstraint::fixPhaseIfNeeded() setPhaseStatus( ABS_PHASE_NEGATIVE ); if ( proofs ) _boundManager->addLemmaExplanationAndTightenBound( - _negAux, 0, Tightening::UB, { _b }, Tightening::UB, getType() ); + _negAux, 0, Tightening::UB, { _b }, Tightening::UB, *this, true, 0 ); return; } @@ -849,8 +853,14 @@ void AbsoluteValueConstraint::fixPhaseIfNeeded() { setPhaseStatus( ABS_PHASE_NEGATIVE ); if ( proofs ) - _boundManager->addLemmaExplanationAndTightenBound( - _negAux, 0, Tightening::UB, { _b, _f }, Tightening::UB, getType() ); + _boundManager->addLemmaExplanationAndTightenBound( _negAux, + 0, + Tightening::UB, + { _b, _f }, + Tightening::UB, + *this, + true, + getUpperBound( _b ) ); return; } @@ -860,8 +870,14 @@ void AbsoluteValueConstraint::fixPhaseIfNeeded() { setPhaseStatus( ABS_PHASE_POSITIVE ); if ( proofs ) - _boundManager->addLemmaExplanationAndTightenBound( - _posAux, 0, Tightening::UB, { _b, _f }, Tightening::LB, getType() ); + _boundManager->addLemmaExplanationAndTightenBound( _posAux, + 0, + Tightening::UB, + { _b, _f }, + Tightening::LB, + *this, + true, + -getLowerBound( _b ) ); return; } @@ -871,6 +887,9 @@ void AbsoluteValueConstraint::fixPhaseIfNeeded() if ( existsUpperBound( _posAux ) && FloatUtils::isZero( getUpperBound( _posAux ) ) ) { setPhaseStatus( ABS_PHASE_POSITIVE ); + if ( proofs ) + _boundManager->addLemmaExplanationAndTightenBound( + _posAux, 0, Tightening::UB, { _posAux }, Tightening::UB, *this, true, 0 ); return; } @@ -880,7 +899,7 @@ void AbsoluteValueConstraint::fixPhaseIfNeeded() setPhaseStatus( ABS_PHASE_NEGATIVE ); if ( proofs ) _boundManager->addLemmaExplanationAndTightenBound( - _negAux, 0, Tightening::UB, { _posAux }, Tightening::LB, getType() ); + _negAux, 0, Tightening::UB, { _posAux }, Tightening::LB, *this, true, 0 ); return; } @@ -888,6 +907,9 @@ void AbsoluteValueConstraint::fixPhaseIfNeeded() if ( existsUpperBound( _negAux ) && FloatUtils::isZero( getUpperBound( _negAux ) ) ) { setPhaseStatus( ABS_PHASE_NEGATIVE ); + if ( proofs ) + _boundManager->addLemmaExplanationAndTightenBound( + _negAux, 0, Tightening::UB, { _negAux }, Tightening::UB, *this, true, 0 ); return; } @@ -897,7 +919,7 @@ void AbsoluteValueConstraint::fixPhaseIfNeeded() setPhaseStatus( ABS_PHASE_POSITIVE ); if ( proofs ) _boundManager->addLemmaExplanationAndTightenBound( - _posAux, 0, Tightening::UB, { _negAux }, Tightening::LB, getType() ); + _posAux, 0, Tightening::UB, { _negAux }, Tightening::LB, *this, true, 0 ); return; } } diff --git a/src/engine/BoundManager.cpp b/src/engine/BoundManager.cpp index 5c8c9d1562..3fd03cd992 100644 --- a/src/engine/BoundManager.cpp +++ b/src/engine/BoundManager.cpp @@ -17,7 +17,6 @@ #include "Debug.h" #include "FloatUtils.h" -#include "InfeasibleQueryException.h" #include "MarabouError.h" #include "Tableau.h" #include "Tightening.h" @@ -417,7 +416,9 @@ bool BoundManager::addLemmaExplanationAndTightenBound( unsigned var, Tightening::BoundType affectedVarBound, const List &causingVars, Tightening::BoundType causingVarBound, - PiecewiseLinearFunctionType constraintType ) + PiecewiseLinearConstraint &constraint, + bool /*isPhaseFixing*/, + double minTargetBound ) { if ( !shouldProduceProofs() ) return false; @@ -432,12 +433,13 @@ bool BoundManager::addLemmaExplanationAndTightenBound( unsigned var, if ( tightened ) { - if ( constraintType == RELU || constraintType == SIGN || constraintType == LEAKY_RELU ) + if ( constraint.getType() == RELU || constraint.getType() == SIGN || + constraint.getType() == LEAKY_RELU ) { ASSERT( causingVars.size() == 1 ); allExplanations.append( getExplanation( causingVars.front(), causingVarBound ) ); } - else if ( constraintType == ABSOLUTE_VALUE ) + else if ( constraint.getType() == ABSOLUTE_VALUE ) { if ( causingVars.size() == 1 ) allExplanations.append( getExplanation( causingVars.front(), causingVarBound ) ); @@ -456,7 +458,7 @@ bool BoundManager::addLemmaExplanationAndTightenBound( unsigned var, allExplanations.append( getExplanation( causingVars.back(), Tightening::LB ) ); } } - else if ( constraintType == MAX ) + else if ( constraint.getType() == MAX ) for ( const auto &element : causingVars ) allExplanations.append( getExplanation( element, Tightening::UB ) ); else @@ -468,12 +470,22 @@ bool BoundManager::addLemmaExplanationAndTightenBound( unsigned var, causingVarBound, affectedVarBound, allExplanations, - constraintType ); - _engine->addPLCLemma( PLCExpl ); + constraint.getType(), + minTargetBound ); + + + _engine->getUNSATCertificateCurrentPointer()->addPLCLemma( PLCExpl ); affectedVarBound == Tightening::UB ? _engine->updateGroundUpperBound( var, value ) : _engine->updateGroundLowerBound( var, value ); + + PLCExpl->setToCheck(); + + // Add ground bound entry to the GroundBoundManager resetExplanation( var, affectedVarBound ); + + _engine->incNumOfLemmas(); } + return true; } diff --git a/src/engine/BoundManager.h b/src/engine/BoundManager.h index 2808e1bbfc..cf04ba05dd 100644 --- a/src/engine/BoundManager.h +++ b/src/engine/BoundManager.h @@ -260,7 +260,9 @@ class BoundManager : public IBoundManager Tightening::BoundType affectedVarBound, const List &causingVars, Tightening::BoundType causingVarBound, - PiecewiseLinearFunctionType constraintType ); + PiecewiseLinearConstraint &constraint, + bool isPhaseFixing, + double minTargetBound ); /* Explainer of all bounds diff --git a/src/engine/CDSmtCore.cpp b/src/engine/CDSearchTreeHandler.cpp similarity index 79% rename from src/engine/CDSmtCore.cpp rename to src/engine/CDSearchTreeHandler.cpp index 6fb9967fed..4287001ccf 100644 --- a/src/engine/CDSmtCore.cpp +++ b/src/engine/CDSearchTreeHandler.cpp @@ -1,5 +1,5 @@ /********************* */ -/*! \file CDSmtCore.cpp +/*! \file CDSearchTreeHandler.cpp ** \verbatim ** Top contributors (to current version): ** Guy Katz, Aleksandar Zeljic, Haoze Wu, Parth Shah @@ -9,10 +9,10 @@ ** All rights reserved. See the file COPYING in the top-level source ** directory for licensing information.\endverbatim ** - ** See the description of the class in CDSmtCore.h. + ** See the description of the class in CDSearchTreeHandler.h. **/ -#include "CDSmtCore.h" +#include "CDSearchTreeHandler.h" #include "Debug.h" #include "DivideStrategy.h" @@ -27,7 +27,7 @@ using namespace CVC4::context; -CDSmtCore::CDSmtCore( IEngine *engine, Context &ctx ) +CDSearchTreeHandler::CDSearchTreeHandler( IEngine *engine, Context &ctx ) : _statistics( NULL ) , _context( ctx ) , _trail( &_context ) @@ -43,11 +43,11 @@ CDSmtCore::CDSmtCore( IEngine *engine, Context &ctx ) { } -CDSmtCore::~CDSmtCore() +CDSearchTreeHandler::~CDSearchTreeHandler() { } -void CDSmtCore::reportViolatedConstraint( PiecewiseLinearConstraint *constraint ) +void CDSearchTreeHandler::reportViolatedConstraint( PiecewiseLinearConstraint *constraint ) { ASSERT( !constraint->phaseFixed() ); @@ -68,7 +68,7 @@ void CDSmtCore::reportViolatedConstraint( PiecewiseLinearConstraint *constraint } } -unsigned CDSmtCore::getViolationCounts( PiecewiseLinearConstraint *constraint ) const +unsigned CDSearchTreeHandler::getViolationCounts( PiecewiseLinearConstraint *constraint ) const { if ( !_constraintToViolationCount.exists( constraint ) ) return 0; @@ -76,7 +76,7 @@ unsigned CDSmtCore::getViolationCounts( PiecewiseLinearConstraint *constraint ) return _constraintToViolationCount[constraint]; } -void CDSmtCore::initializeScoreTrackerIfNeeded( +void CDSearchTreeHandler::initializeScoreTrackerIfNeeded( const List &plConstraints ) { if ( GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH ) @@ -84,11 +84,11 @@ void CDSmtCore::initializeScoreTrackerIfNeeded( _scoreTracker = std::unique_ptr( new PseudoImpactTracker() ); _scoreTracker->initialize( plConstraints ); - SMT_LOG( "\tTracking Pseudo Impact..." ); + CD_SEARCH_TREE_LOG( "\tTracking Pseudo Impact..." ); } } -void CDSmtCore::reportRejectedPhasePatternProposal() +void CDSearchTreeHandler::reportRejectedPhasePatternProposal() { ++_numRejectedPhasePatternProposal; @@ -102,29 +102,30 @@ void CDSmtCore::reportRejectedPhasePatternProposal() } } -bool CDSmtCore::needToSplit() const +bool CDSearchTreeHandler::needToSplit() const { return _needToSplit; } -void CDSmtCore::pushDecision( PiecewiseLinearConstraint *constraint, PhaseStatus decision ) +void CDSearchTreeHandler::pushDecision( PiecewiseLinearConstraint *constraint, + PhaseStatus decision ) { - SMT_LOG( Stringf( "Decision @ %d )", _context.getLevel() + 1 ).ascii() ); + CD_SEARCH_TREE_LOG( Stringf( "Decision @ %d )", _context.getLevel() + 1 ).ascii() ); TrailEntry te( constraint, decision ); applyTrailEntry( te, true ); - SMT_LOG( Stringf( "Decision push @ %d DONE", _context.getLevel() ).ascii() ); + CD_SEARCH_TREE_LOG( Stringf( "Decision push @ %d DONE", _context.getLevel() ).ascii() ); } -void CDSmtCore::pushImplication( PiecewiseLinearConstraint *constraint ) +void CDSearchTreeHandler::pushImplication( PiecewiseLinearConstraint *constraint ) { ASSERT( constraint->isImplication() ); - SMT_LOG( Stringf( "Implication @ %d ... ", _context.getLevel() ).ascii() ); + CD_SEARCH_TREE_LOG( Stringf( "Implication @ %d ... ", _context.getLevel() ).ascii() ); TrailEntry te( constraint, constraint->nextFeasibleCase() ); applyTrailEntry( te, false ); - SMT_LOG( Stringf( "Implication @ %d DONE", _context.getLevel() ).ascii() ); + CD_SEARCH_TREE_LOG( Stringf( "Implication @ %d DONE", _context.getLevel() ).ascii() ); } -void CDSmtCore::applyTrailEntry( TrailEntry &te, bool isDecision ) +void CDSearchTreeHandler::applyTrailEntry( TrailEntry &te, bool isDecision ) { if ( isDecision ) { @@ -136,10 +137,10 @@ void CDSmtCore::applyTrailEntry( TrailEntry &te, bool isDecision ) _engine->applySplit( te.getPiecewiseLinearCaseSplit() ); } -void CDSmtCore::decide() +void CDSearchTreeHandler::decide() { ASSERT( _needToSplit ); - SMT_LOG( "Performing a ReLU split" ); + CD_SEARCH_TREE_LOG( "Performing a ReLU split" ); _numRejectedPhasePatternProposal = 0; // Maybe the constraint has already become inactive - if so, ignore @@ -161,7 +162,7 @@ void CDSmtCore::decide() decideSplit( _constraintForSplitting ); } -void CDSmtCore::decideSplit( PiecewiseLinearConstraint *constraint ) +void CDSearchTreeHandler::decideSplit( PiecewiseLinearConstraint *constraint ) { struct timespec start = TimeUtils::sampleMicro(); @@ -187,51 +188,51 @@ void CDSmtCore::decideSplit( PiecewiseLinearConstraint *constraint ) _statistics->setUnsignedAttribute( Statistics::MAX_DECISION_LEVEL, level ); struct timespec end = TimeUtils::sampleMicro(); - _statistics->incLongAttribute( Statistics::TOTAL_TIME_SMT_CORE_MICRO, + _statistics->incLongAttribute( Statistics::TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO, TimeUtils::timePassed( start, end ) ); } - SMT_LOG( "Performing a ReLU split - DONE" ); + CD_SEARCH_TREE_LOG( "Performing a ReLU split - DONE" ); } -unsigned CDSmtCore::getDecisionLevel() const +unsigned CDSearchTreeHandler::getDecisionLevel() const { return _decisions.size(); } -bool CDSmtCore::popDecisionLevel( TrailEntry &lastDecision ) +bool CDSearchTreeHandler::popDecisionLevel( TrailEntry &lastDecision ) { // ASSERT( static_cast( _context.getLevel() ) == _decisions.size() ); if ( _decisions.empty() ) return false; - SMT_LOG( "Popping trail ..." ); + CD_SEARCH_TREE_LOG( "Popping trail ..." ); lastDecision = _decisions.back(); _context.pop(); _engine->postContextPopHook(); - SMT_LOG( Stringf( "to %d DONE", _context.getLevel() ).ascii() ); + CD_SEARCH_TREE_LOG( Stringf( "to %d DONE", _context.getLevel() ).ascii() ); return true; } -void CDSmtCore::interruptIfCompliantWithDebugSolution() +void CDSearchTreeHandler::interruptIfCompliantWithDebugSolution() { if ( checkSkewFromDebuggingSolution() ) { - SMT_LOG( "Error! Popping from a compliant stack\n" ); + CD_SEARCH_TREE_LOG( "Error! Popping from a compliant stack\n" ); throw MarabouError( MarabouError::DEBUGGING_ERROR ); } } -PiecewiseLinearCaseSplit CDSmtCore::getDecision( unsigned decisionLevel ) const +PiecewiseLinearCaseSplit CDSearchTreeHandler::getDecision( unsigned decisionLevel ) const { ASSERT( decisionLevel <= getDecisionLevel() ); ASSERT( decisionLevel > 0 ); return _decisions[decisionLevel - 1].getPiecewiseLinearCaseSplit(); } -bool CDSmtCore::backtrackToFeasibleDecision( TrailEntry &lastDecision ) +bool CDSearchTreeHandler::backtrackToFeasibleDecision( TrailEntry &lastDecision ) { - SMT_LOG( "Backtracking to a feasible decision..." ); + CD_SEARCH_TREE_LOG( "Backtracking to a feasible decision..." ); if ( getDecisionLevel() == 0 ) return false; @@ -254,7 +255,7 @@ bool CDSmtCore::backtrackToFeasibleDecision( TrailEntry &lastDecision ) return true; } -bool CDSmtCore::backtrackAndContinueSearch() +bool CDSearchTreeHandler::backtrackAndContinueSearch() { TrailEntry feasibleDecision( nullptr, CONSTRAINT_INFEASIBLE ); struct timespec start = TimeUtils::sampleMicro(); @@ -280,7 +281,7 @@ bool CDSmtCore::backtrackAndContinueSearch() if ( level > _statistics->getUnsignedAttribute( Statistics::MAX_DECISION_LEVEL ) ) _statistics->setUnsignedAttribute( Statistics::MAX_DECISION_LEVEL, level ); struct timespec end = TimeUtils::sampleMicro(); - _statistics->incLongAttribute( Statistics::TOTAL_TIME_SMT_CORE_MICRO, + _statistics->incLongAttribute( Statistics::TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO, TimeUtils::timePassed( start, end ) ); } @@ -288,14 +289,14 @@ bool CDSmtCore::backtrackAndContinueSearch() return true; } -void CDSmtCore::resetReportedViolations() +void CDSearchTreeHandler::resetReportedViolations() { _constraintToViolationCount.clear(); _numRejectedPhasePatternProposal = 0; _needToSplit = false; } -void CDSmtCore::allSplitsSoFar( List &result ) const +void CDSearchTreeHandler::allSplitsSoFar( List &result ) const { result.clear(); @@ -303,19 +304,19 @@ void CDSmtCore::allSplitsSoFar( List &result ) const result.append( trailEntry.getPiecewiseLinearCaseSplit() ); } -void CDSmtCore::setStatistics( Statistics *statistics ) +void CDSearchTreeHandler::setStatistics( Statistics *statistics ) { _statistics = statistics; } -void CDSmtCore::storeDebuggingSolution( const Map &debuggingSolution ) +void CDSearchTreeHandler::storeDebuggingSolution( const Map &debuggingSolution ) { _debuggingSolution = debuggingSolution; } // Return true if stack is currently compliant, false otherwise // If there is no stored solution, return false --- incompliant. -bool CDSmtCore::checkSkewFromDebuggingSolution() +bool CDSearchTreeHandler::checkSkewFromDebuggingSolution() { if ( _debuggingSolution.empty() ) return false; @@ -372,8 +373,8 @@ bool CDSmtCore::checkSkewFromDebuggingSolution() return true; } -bool CDSmtCore::splitAllowsStoredSolution( const PiecewiseLinearCaseSplit &split, - String &error ) const +bool CDSearchTreeHandler::splitAllowsStoredSolution( const PiecewiseLinearCaseSplit &split, + String &error ) const { // False if the split prevents one of the values in the stored solution, true otherwise. error = ""; @@ -416,12 +417,12 @@ bool CDSmtCore::splitAllowsStoredSolution( const PiecewiseLinearCaseSplit &split return true; } -void CDSmtCore::setConstraintViolationThreshold( unsigned threshold ) +void CDSearchTreeHandler::setConstraintViolationThreshold( unsigned threshold ) { _constraintViolationThreshold = threshold; } -PiecewiseLinearConstraint *CDSmtCore::chooseViolatedConstraintForFixing( +PiecewiseLinearConstraint *CDSearchTreeHandler::chooseViolatedConstraintForFixing( List &_violatedPlConstraints ) const { ASSERT( !_violatedPlConstraints.empty() ); @@ -455,14 +456,14 @@ PiecewiseLinearConstraint *CDSmtCore::chooseViolatedConstraintForFixing( return candidate; } -bool CDSmtCore::pickSplitPLConstraint() +bool CDSearchTreeHandler::pickSplitPLConstraint() { if ( _needToSplit ) _constraintForSplitting = _engine->pickSplitPLConstraint( _branchingHeuristic ); return _constraintForSplitting != NULL; } -void CDSmtCore::reset() +void CDSearchTreeHandler::reset() { _context.popto( 0 ); _engine->postContextPopHook(); diff --git a/src/engine/CDSmtCore.h b/src/engine/CDSearchTreeHandler.h similarity index 86% rename from src/engine/CDSmtCore.h rename to src/engine/CDSearchTreeHandler.h index 8cbeebc5c0..2c0d0d97e4 100644 --- a/src/engine/CDSmtCore.h +++ b/src/engine/CDSearchTreeHandler.h @@ -1,5 +1,5 @@ /********************* */ -/*! \file CDSmtCore.h +/*! \file CDSearchTreeHandler.h ** \verbatim ** Top contributors (to current version): ** Guy Katz, AleksandarZeljic, Haoze Wu, Parth Shah @@ -9,8 +9,8 @@ ** All rights reserved. See the file COPYING in the top-level source ** directory for licensing information.\endverbatim ** - ** The CDSmtCore class implements a context-dependent SmtCore class. - ** The CDSmtCore distinguishes between: **decisions** and **implications**. + ** The CDSearchTreeHandler class implements a context-dependent CDSearchTreeHandler class. + ** The CDSearchTreeHandler distinguishes between: **decisions** and **implications**. ** ** Decision is a case of PiecewiseLinearConstraint asserted on the trail. ** A decision is a choice between multiple feasible cases of a @@ -24,7 +24,7 @@ ** by exhausting all other cases) performs an implication. ** ** The overall search state is stored in a distributed way: - ** - CDSmtCore::_trail is the current search state in a chronological order + ** - CDSearchTreeHandler::_trail is the current search state in a chronological order ** - PiecewiseLinearConstraints' infeasible cases enumerate all the explored ** states w.r.t to the chronological order on the _trail. ** @@ -34,7 +34,7 @@ ** _trail and _decisions are both context dependent and will synchronize in ** unison with the _context object. ** - ** When a search state is found to be infeasible, CDSmtCore backtracks to the + ** When a search state is found to be infeasible, CDSearchTreeHandler backtracks to the ** last decision and continues the search. ** ** PushDecision advances the decision level and context level. @@ -63,8 +63,8 @@ ** - Using BoundManager class to store bounds in a context-dependent manner **/ -#ifndef __CDSmtCore_h__ -#define __CDSmtCore_h__ +#ifndef __CDSearchTreeHandler_h__ +#define __CDSearchTreeHandler_h__ #include "Options.h" #include "PLConstraintScoreTracker.h" @@ -76,17 +76,18 @@ #include "context/cdlist.h" #include "context/context.h" -#define SMT_LOG( x, ... ) LOG( GlobalConfiguration::SMT_CORE_LOGGING, "CDSmtCore: %s\n", x ) +#define CD_SEARCH_TREE_LOG( x, ... ) \ + LOG( GlobalConfiguration::SEARCH_TREE_HANDLER_LOGGING, "CDSearchTreeHandler: %s\n", x ) class EngineState; class Engine; class String; -class CDSmtCore +class CDSearchTreeHandler { public: - CDSmtCore( IEngine *engine, CVC4::context::Context &context ); - ~CDSmtCore(); + CDSearchTreeHandler( IEngine *engine, CVC4::context::Context &context ); + ~CDSearchTreeHandler(); /* Clear the stack. @@ -99,7 +100,7 @@ class CDSmtCore void initializeScoreTrackerIfNeeded( const List &plConstraints ); /* - Inform the SMT core that a SoI phase pattern proposal is rejected. + Inform the Search Tree Handler that a SoI phase pattern proposal is rejected. */ void reportRejectedPhasePatternProposal(); @@ -121,7 +122,7 @@ class CDSmtCore } /* - Inform the SMT core that a PL constraint is violated. + Inform the Search Tree Handler that a PL constraint is violated. */ void reportViolatedConstraint( PiecewiseLinearConstraint *constraint ); @@ -137,7 +138,7 @@ class CDSmtCore void resetReportedViolations(); /* - Returns true iff the SMT core wants to perform a case split. + Returns true iff the Search Tree Handler wants to perform a case split. */ bool needToSplit() const; @@ -147,7 +148,7 @@ class CDSmtCore void pushDecision( PiecewiseLinearConstraint *constraint, PhaseStatus decision ); /* - Inform SmtCore of an implied (formerly valid) case split that was discovered. + Inform Search Tree Handler of an implied (formerly valid) case split that was discovered. */ void pushImplication( PiecewiseLinearConstraint *constraint ); @@ -196,7 +197,7 @@ class CDSmtCore unsigned getDecisionLevel() const; /* - Return a list of all splits performed so far, both SMT-originating and + Return a list of all splits performed so far, both Search Tree Handler-originating and valid ones, in the correct order. */ void allSplitsSoFar( List &result ) const; @@ -218,12 +219,12 @@ class CDSmtCore }; /* - Have the SMT core start reporting statistics. + Have the Search Tree Handler start reporting statistics. */ void setStatistics( Statistics *statistics ); /* - Have the SMT core choose, among a set of violated PL constraints, which + Have the Search Tree Handler choose, among a set of violated PL constraints, which constraint should be repaired (without splitting) */ PiecewiseLinearConstraint *chooseViolatedConstraintForFixing( @@ -329,4 +330,4 @@ class CDSmtCore unsigned _numRejectedPhasePatternProposal; }; -#endif // __CDSmtCore_h__ +#endif // __CDSearchTreeHandler_h__ diff --git a/src/engine/CMakeLists.txt b/src/engine/CMakeLists.txt index 487d806d3c..6563c0517c 100644 --- a/src/engine/CMakeLists.txt +++ b/src/engine/CMakeLists.txt @@ -8,12 +8,12 @@ target_include_directories(${MARABOU_LIB} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") target_sources(${MARABOU_TEST_LIB} PRIVATE ${SRCS}) target_include_directories(${MARABOU_TEST_LIB} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") -set (ENGINE_TESTS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tests") +set(ENGINE_TESTS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tests") macro(engine_add_unit_test name) set(USE_MOCK_COMMON FALSE) set(USE_MOCK_ENGINE FALSE) marabou_add_test(${ENGINE_TESTS_DIR}/Test_${name} engine USE_MOCK_COMMON - USE_MOCK_ENGINE "unit") + USE_MOCK_ENGINE "unit") endmacro() engine_add_unit_test(AbsoluteValueConstraint) @@ -41,10 +41,10 @@ engine_add_unit_test(Query) engine_add_unit_test(ReluConstraint) engine_add_unit_test(RoundConstraint) engine_add_unit_test(RowBoundTightener) +engine_add_unit_test(SearchTreeHandler) engine_add_unit_test(SignConstraint) engine_add_unit_test(SigmoidConstraint) engine_add_unit_test(SoftmaxConstraint) -engine_add_unit_test(SmtCore) engine_add_unit_test(SumOfInfeasibilitiesManager) engine_add_unit_test(Tableau) engine_add_unit_test(BaBsrSplitting) diff --git a/src/engine/DnCWorker.cpp b/src/engine/DnCWorker.cpp index 4765eae5f1..367ebe1b32 100644 --- a/src/engine/DnCWorker.cpp +++ b/src/engine/DnCWorker.cpp @@ -85,9 +85,9 @@ void DnCWorker::popOneSubQueryAndSolve( bool restoreTreeStates ) String queryId = subQuery->_queryId; unsigned depth = subQuery->_depth; auto split = std::move( subQuery->_split ); - std::unique_ptr smtState = nullptr; - if ( restoreTreeStates && subQuery->_smtState ) - smtState = std::move( subQuery->_smtState ); + std::unique_ptr searchTreeState = nullptr; + if ( restoreTreeStates && subQuery->_searchTreeState ) + searchTreeState = std::move( subQuery->_searchTreeState ); unsigned timeoutInSeconds = subQuery->_timeoutInSeconds; // Reset the engine state @@ -102,8 +102,8 @@ void DnCWorker::popOneSubQueryAndSolve( bool restoreTreeStates ) _engine->applySnCSplit( *split, queryId ); bool fullSolveNeeded = true; // denotes whether we need to solve the subquery - if ( restoreTreeStates && smtState ) - fullSolveNeeded = _engine->restoreSmtState( *smtState ); + if ( restoreTreeStates && searchTreeState ) + fullSolveNeeded = _engine->restoreSearchTreeState( *searchTreeState ); IEngine::ExitCode result = IEngine::NOT_DONE; if ( fullSolveNeeded ) { @@ -136,14 +136,15 @@ void DnCWorker::popOneSubQueryAndSolve( bool restoreTreeStates ) ? 0 : (unsigned)timeoutInSeconds * _timeoutFactor ); unsigned numNewSubQueries = pow( 2, _onlineDivides ); - std::vector> newSmtStates; + std::vector> newSearchTreeStates; if ( restoreTreeStates ) { - // create |numNewSubQueries| copies of the current SmtState + // create |numNewSubQueries| copies of the current SearchTreeState for ( unsigned i = 0; i < numNewSubQueries; ++i ) { - newSmtStates.push_back( std::unique_ptr( new SmtState() ) ); - _engine->storeSmtState( *( newSmtStates[i] ) ); + newSearchTreeStates.push_back( + std::unique_ptr( new SearchTreeState() ) ); + _engine->storeSearchTreeState( *( newSearchTreeStates[i] ) ); } } @@ -153,10 +154,10 @@ void DnCWorker::popOneSubQueryAndSolve( bool restoreTreeStates ) unsigned i = 0; for ( auto &newSubQuery : subQueries ) { - // Store the SmtCore state + // Store the SearchTreeHandler state if ( restoreTreeStates ) { - newSubQuery->_smtState = std::move( newSmtStates[i++] ); + newSubQuery->_searchTreeState = std::move( newSearchTreeStates[i++] ); } if ( !_workload->push( std::move( newSubQuery ) ) ) diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index 1802cd2d2a..353b8a9bb3 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -41,7 +41,7 @@ Engine::Engine() , _tableau( _boundManager ) , _preprocessedQuery( nullptr ) , _rowBoundTightener( *_tableau ) - , _smtCore( this ) + , _searchTreeHandler( this ) , _numPlConstraintsDisabledByValidSplits( 0 ) , _preprocessingEnabled( false ) , _initialStateStored( false ) @@ -73,7 +73,7 @@ Engine::Engine() , _groundBoundManager( _context ) , _UNSATCertificate( NULL ) { - _smtCore.setStatistics( &_statistics ); + _searchTreeHandler.setStatistics( &_statistics ); _tableau->setStatistics( &_statistics ); _rowBoundTightener->setStatistics( &_statistics ); _preprocessor.setStatistics( &_statistics ); @@ -137,7 +137,7 @@ void Engine::applySnCSplit( PiecewiseLinearCaseSplit sncSplit, String queryId ) _sncSplit = sncSplit; _queryId = queryId; preContextPushHook(); - _smtCore.pushContext(); + _searchTreeHandler.pushContext(); applySplit( sncSplit ); _boundManager.propagateTightenings(); } @@ -289,7 +289,7 @@ bool Engine::solve( double timeoutInSeconds ) } } - // If true, we just entered a new subproblem + // If true, we just entered a new sub-problem if ( splitJustPerformed ) { performBoundTighteningAfterCaseSplit(); @@ -297,10 +297,9 @@ bool Engine::solve( double timeoutInSeconds ) splitJustPerformed = false; } - // Perform any SmtCore-initiated case splits - if ( _smtCore.needToSplit() ) + if ( _searchTreeHandler.needToSplit() ) { - _smtCore.performSplit(); + _searchTreeHandler.performSplit(); splitJustPerformed = true; continue; } @@ -363,8 +362,8 @@ bool Engine::solve( double timeoutInSeconds ) } else { - while ( !_smtCore.needToSplit() ) - _smtCore.reportRejectedPhasePatternProposal(); + while ( !_searchTreeHandler.needToSplit() ) + _searchTreeHandler.reportRejectedPhasePatternProposal(); continue; } } @@ -409,7 +408,7 @@ bool Engine::solve( double timeoutInSeconds ) if ( _produceUNSATProofs ) explainSimplexFailure(); - if ( !_smtCore.popSplit() ) + if ( !_searchTreeHandler.popSplit() ) { mainLoopEnd = TimeUtils::sampleMicro(); _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, @@ -467,7 +466,7 @@ void Engine::mainLoopStatistics() _statistics.setUnsignedAttribute( Statistics::NUM_ACTIVE_PL_CONSTRAINTS, activeConstraints ); _statistics.setUnsignedAttribute( Statistics::NUM_PL_VALID_SPLITS, _numPlConstraintsDisabledByValidSplits ); - _statistics.setUnsignedAttribute( Statistics::NUM_PL_SMT_ORIGINATED_SPLITS, + _statistics.setUnsignedAttribute( Statistics::NUM_PL_SEARCH_TREE_ORIGINATED_SPLITS, _plConstraints.size() - activeConstraints - _numPlConstraintsDisabledByValidSplits ); @@ -565,7 +564,7 @@ bool Engine::performPrecisionRestorationIfNeeded() // Restoration is not required _basisRestorationPerformed = Engine::NO_RESTORATION_PERFORMED; - // Possible restoration due to preceision degradation + // Possible restoration due to precision degradation if ( shouldCheckDegradation() && highDegradation() ) { performPrecisionRestoration( PrecisionRestorer::RESTORE_BASICS ); @@ -613,7 +612,7 @@ void Engine::performConstraintFixingStep() // Select a violated constraint as the target selectViolatedPlConstraint(); - // Report the violated constraint to the SMT engine + // Report the violated constraint to the Search Tree engine reportPlViolation(); // Attempt to fix the constraint @@ -1114,7 +1113,7 @@ void Engine::selectInitialVariablesForBasis( const double *constraintMatrix, /* This method permutes rows and columns in the constraint matrix (prior to the addition of auxiliary variables), in order to obtain a set of - column that constitue a lower triangular matrix. The variables + column that constitute a lower triangular matrix. The variables corresponding to the columns of this matrix join the initial basis. (It is possible that not enough variables are obtained this way, in which @@ -1339,7 +1338,7 @@ void Engine::initializeTableau( const double *constraintMatrix, const ListsetConstraintMatrix( constraintMatrix ); _tableau->registerToWatchAllVariables( _rowBoundTightener ); @@ -1512,7 +1511,11 @@ bool Engine::processInputQuery( const IQuery &inputQuery, bool preprocess ) } for ( const auto &constraint : _plConstraints ) + { constraint->registerTableau( _tableau ); + if ( !Options::get()->getBool( Options::DNC_MODE ) ) + constraint->initializeCDOs( &_context ); + } for ( const auto &constraint : _nlConstraints ) constraint->registerTableau( _tableau ); @@ -1563,7 +1566,7 @@ bool Engine::processInputQuery( const IQuery &inputQuery, bool preprocess ) } } ); - _smtCore.storeDebuggingSolution( _preprocessedQuery->_debuggingSolution ); + _searchTreeHandler.storeDebuggingSolution( _preprocessedQuery->_debuggingSolution ); return true; } @@ -1571,7 +1574,7 @@ void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) { if ( _networkLevelReasoner && Options::get()->gurobiEnabled() ) { - // Obtain from and store bounds into inputquery if it is not null. + // Obtain from and store bounds into inputQuery if it is not null. if ( inputQuery ) _networkLevelReasoner->obtainCurrentBounds( *inputQuery ); else @@ -1843,14 +1846,15 @@ void Engine::selectViolatedPlConstraint() { ASSERT( !_violatedPlConstraints.empty() ); - _plConstraintToFix = _smtCore.chooseViolatedConstraintForFixing( _violatedPlConstraints ); + _plConstraintToFix = + _searchTreeHandler.chooseViolatedConstraintForFixing( _violatedPlConstraints ); ASSERT( _plConstraintToFix ); } void Engine::reportPlViolation() { - _smtCore.reportViolatedConstraint( _plConstraintToFix ); + _searchTreeHandler.reportViolatedConstraint( _plConstraintToFix ); } void Engine::storeState( EngineState &state, TableauStateStorageLevel level ) const @@ -1859,7 +1863,8 @@ void Engine::storeState( EngineState &state, TableauStateStorageLevel level ) co state._tableauStateStorageLevel = level; for ( const auto &constraint : _plConstraints ) - state._plConstraintToState[constraint] = constraint->duplicateConstraint(); + if ( !constraint->getContext() ) + state._plConstraintToState[constraint] = constraint->duplicateConstraint(); state._numPlConstraintsDisabledByValidSplits = _numPlConstraintsDisabledByValidSplits; } @@ -1877,10 +1882,11 @@ void Engine::restoreState( const EngineState &state ) ENGINE_LOG( "\tRestoring constraint states" ); for ( auto &constraint : _plConstraints ) { - if ( !state._plConstraintToState.exists( constraint ) ) + if ( !state._plConstraintToState.exists( constraint ) && !constraint->getContext() ) throw MarabouError( MarabouError::MISSING_PL_CONSTRAINT_STATE ); - constraint->restoreState( state._plConstraintToState[constraint] ); + if ( !constraint->getContext() ) + constraint->restoreState( state._plConstraintToState[constraint] ); } _numPlConstraintsDisabledByValidSplits = state._numPlConstraintsDisabledByValidSplits; @@ -1894,8 +1900,8 @@ void Engine::restoreState( const EngineState &state ) _costFunctionManager->initialize(); } - // Reset the violation counts in the SMT core - _smtCore.resetSplitConditions(); + // Reset the violation counts in the Search Tree handler + _searchTreeHandler.resetSplitConditions(); } void Engine::setNumPlConstraintsDisabledByValidSplits( unsigned numConstraints ) @@ -2212,7 +2218,7 @@ bool Engine::applyValidConstraintCaseSplit( PiecewiseLinearConstraint *constrain constraint->setActiveConstraint( false ); PiecewiseLinearCaseSplit validSplit = constraint->getValidCaseSplit(); - _smtCore.recordImpliedValidSplit( validSplit ); + _searchTreeHandler.recordImpliedValidSplit( validSplit ); applySplit( validSplit ); if ( _soiManager ) @@ -2261,7 +2267,7 @@ void Engine::tightenBoundsOnConstraintMatrix() struct timespec start = TimeUtils::sampleMicro(); if ( _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ) % - GlobalConfiguration::BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY == + GlobalConfiguration::BOUND_TIGHTENING_ON_CONSTRAINT_MATRIX_FREQUENCY == 0 ) { _rowBoundTightener->examineConstraintMatrix( true ); @@ -2308,7 +2314,7 @@ void Engine::performPrecisionRestoration( PrecisionRestorer::RestoreBasics resto double before = _degradationChecker.computeDegradation( *_tableau ); // - _precisionRestorer.restorePrecision( *this, *_tableau, _smtCore, restoreBasics ); + _precisionRestorer.restorePrecision( *this, *_tableau, _searchTreeHandler, restoreBasics ); struct timespec end = TimeUtils::sampleMicro(); _statistics.incLongAttribute( Statistics::TOTAL_TIME_PRECISION_RESTORATION, TimeUtils::timePassed( start, end ) ); @@ -2329,7 +2335,7 @@ void Engine::performPrecisionRestoration( PrecisionRestorer::RestoreBasics resto // Try again! start = TimeUtils::sampleMicro(); _precisionRestorer.restorePrecision( - *this, *_tableau, _smtCore, PrecisionRestorer::DO_NOT_RESTORE_BASICS ); + *this, *_tableau, _searchTreeHandler, PrecisionRestorer::DO_NOT_RESTORE_BASICS ); end = TimeUtils::sampleMicro(); _statistics.incLongAttribute( Statistics::TOTAL_TIME_PRECISION_RESTORATION, TimeUtils::timePassed( start, end ) ); @@ -2375,7 +2381,7 @@ Query *Engine::getQuery() void Engine::checkBoundCompliancyWithDebugSolution() { - if ( _smtCore.checkSkewFromDebuggingSolution() ) + if ( _searchTreeHandler.checkSkewFromDebuggingSolution() ) { // The stack is compliant, we should not have learned any non-compliant bounds for ( const auto &var : _preprocessedQuery->_debuggingSolution ) @@ -2470,7 +2476,7 @@ unsigned Engine::performSymbolicBoundTightening( Query *inputQuery ) // Step 1: tell the NLR about the current bounds if ( inputQuery ) { - // Obtain from and store bounds into inputquery if it is not null. + // Obtain from and store bounds into inputQuery if it is not null. _networkLevelReasoner->obtainCurrentBounds( *inputQuery ); } else @@ -2581,16 +2587,15 @@ void Engine::reset() resetStatistics(); _sncMode = false; clearViolatedPLConstraints(); - resetSmtCore(); + resetSearchTreeHandler(); resetBoundTighteners(); - resetExitCode(); } void Engine::resetStatistics() { Statistics statistics; _statistics = statistics; - _smtCore.setStatistics( &_statistics ); + _searchTreeHandler.setStatistics( &_statistics ); _tableau->setStatistics( &_statistics ); _rowBoundTightener->setStatistics( &_statistics ); _preprocessor.setStatistics( &_statistics ); @@ -2605,10 +2610,10 @@ void Engine::clearViolatedPLConstraints() _plConstraintToFix = NULL; } -void Engine::resetSmtCore() +void Engine::resetSearchTreeHandler() { - _smtCore.reset(); - _smtCore.initializeScoreTrackerIfNeeded( _plConstraints ); + _searchTreeHandler.reset(); + _searchTreeHandler.initializeScoreTrackerIfNeeded( _plConstraints ); } void Engine::resetExitCode() @@ -2720,9 +2725,9 @@ void Engine::decideBranchingHeuristics() _preprocessedQuery->getInputVariables().size() < GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD ) { - // NOTE: the benefit of input splitting is minimal with abstract intepretation disabled. - // Therefore, since the proof production mode does not currently support that, we do - // not perform input-splitting in proof production mode. + // NOTE: the benefit of input splitting is minimal with abstract interpretation + // disabled. Therefore, since the proof production mode does not currently support that, + // we do not perform input-splitting in proof production mode. divideStrategy = DivideStrategy::LargestInterval; if ( _verbosity >= 2 ) printf( "Branching heuristics set to LargestInterval\n" ); @@ -2744,8 +2749,8 @@ void Engine::decideBranchingHeuristics() } } ASSERT( divideStrategy != DivideStrategy::Auto ); - _smtCore.setBranchingHeuristics( divideStrategy ); - _smtCore.initializeScoreTrackerIfNeeded( _plConstraints ); + _searchTreeHandler.setBranchingHeuristics( divideStrategy ); + _searchTreeHandler.initializeScoreTrackerIfNeeded( _plConstraints ); } PiecewiseLinearConstraint *Engine::pickSplitPLConstraintBasedOnBaBsrHeuristic() @@ -2917,8 +2922,8 @@ PiecewiseLinearConstraint *Engine::pickSplitPLConstraint( DivideStrategy strateg PiecewiseLinearConstraint *candidatePLConstraint = NULL; if ( strategy == DivideStrategy::PseudoImpact ) { - if ( _smtCore.getStackDepth() > 3 ) - candidatePLConstraint = _smtCore.getConstraintsWithHighestScore(); + if ( _context.getLevel() > 3 ) + candidatePLConstraint = _searchTreeHandler.getConstraintsWithHighestScore(); else if ( !_preprocessedQuery->getInputVariables().empty() && _preprocessedQuery->getInputVariables().size() < GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD ) @@ -2927,7 +2932,7 @@ PiecewiseLinearConstraint *Engine::pickSplitPLConstraint( DivideStrategy strateg { candidatePLConstraint = pickSplitPLConstraintBasedOnPolarity(); if ( candidatePLConstraint == NULL ) - candidatePLConstraint = _smtCore.getConstraintsWithHighestScore(); + candidatePLConstraint = _searchTreeHandler.getConstraintsWithHighestScore(); } } else if ( strategy == DivideStrategy::BaBSR ) @@ -2937,8 +2942,7 @@ PiecewiseLinearConstraint *Engine::pickSplitPLConstraint( DivideStrategy strateg else if ( strategy == DivideStrategy::EarliestReLU ) candidatePLConstraint = pickSplitPLConstraintBasedOnTopology(); else if ( strategy == DivideStrategy::LargestInterval && - ( ( _smtCore.getStackDepth() + 1 ) % - GlobalConfiguration::INTERVAL_SPLITTING_FREQUENCY != + ( ( _context.getLevel() + 1 ) % GlobalConfiguration::INTERVAL_SPLITTING_FREQUENCY != 0 ) ) { // Conduct interval splitting periodically. @@ -2967,17 +2971,17 @@ PiecewiseLinearConstraint *Engine::pickSplitPLConstraintSnC( SnCDivideStrategy s return candidatePLConstraint; } -bool Engine::restoreSmtState( SmtState &smtState ) +bool Engine::restoreSearchTreeState( SearchTreeState &searchTreeState ) { try { - ASSERT( _smtCore.getStackDepth() == 0 ); + ASSERT( _searchTreeHandler.getStackDepth() == 0 ); // Step 1: all implied valid splits at root - for ( auto &validSplit : smtState._impliedValidSplitsAtRoot ) + for ( auto &validSplit : searchTreeState._impliedValidSplitsAtRoot ) { applySplit( validSplit ); - _smtCore.recordImpliedValidSplit( validSplit ); + _searchTreeHandler.recordImpliedValidSplit( validSplit ); } tightenBoundsOnConstraintMatrix(); @@ -2989,11 +2993,11 @@ bool Engine::restoreSmtState( SmtState &smtState ) while ( applyAllValidConstraintCaseSplits() ); // Step 2: replay the stack - for ( auto &stackEntry : smtState._stack ) + for ( auto &stackEntry : searchTreeState._stack ) { - _smtCore.replaySmtStackEntry( stackEntry ); + _searchTreeHandler.replaySearchTreeStackEntry( stackEntry ); // Do all the bound propagation, and set ReLU constraints to inactive (at - // least the one corresponding to the _activeSplit applied above. + // least the one corresponding to the _activeSplit applied above). tightenBoundsOnConstraintMatrix(); // For debugging purposes @@ -3011,7 +3015,7 @@ bool Engine::restoreSmtState( SmtState &smtState ) if ( _produceUNSATProofs ) explainSimplexFailure(); - if ( !_smtCore.popSplit() ) + if ( !_searchTreeHandler.popSplit() ) { if ( _verbosity > 0 ) { @@ -3027,9 +3031,9 @@ bool Engine::restoreSmtState( SmtState &smtState ) return true; } -void Engine::storeSmtState( SmtState &smtState ) +void Engine::storeSearchTreeState( SearchTreeState &searchTreeState ) { - _smtCore.storeSmtState( smtState ); + _searchTreeHandler.storeSearchTreeState( searchTreeState ); } bool Engine::solveWithMILPEncoding( double timeoutInSeconds ) @@ -3116,8 +3120,8 @@ bool Engine::performDeepSoILocalSearch() if ( initialPhasePattern.isZero() ) { if ( hasBranchingCandidate() ) - while ( !_smtCore.needToSplit() ) - _smtCore.reportRejectedPhasePatternProposal(); + while ( !_searchTreeHandler.needToSplit() ) + _searchTreeHandler.reportRejectedPhasePatternProposal(); return false; } @@ -3131,7 +3135,7 @@ bool Engine::performDeepSoILocalSearch() double costOfProposedPhasePattern = FloatUtils::infinity(); bool lastProposalAccepted = true; - while ( !_smtCore.needToSplit() ) + while ( !_searchTreeHandler.needToSplit() ) { struct timespec end = TimeUtils::sampleMicro(); _statistics.incLongAttribute( Statistics::TOTAL_TIME_LOCAL_SEARCH_MICRO, @@ -3180,8 +3184,8 @@ bool Engine::performDeepSoILocalSearch() // the SoI with the hope to branch on them early. bumpUpPseudoImpactOfPLConstraintsNotInSoI(); if ( hasBranchingCandidate() ) - while ( !_smtCore.needToSplit() ) - _smtCore.reportRejectedPhasePatternProposal(); + while ( !_searchTreeHandler.needToSplit() ) + _searchTreeHandler.reportRejectedPhasePatternProposal(); return false; } } @@ -3210,7 +3214,7 @@ bool Engine::performDeepSoILocalSearch() } else { - _smtCore.reportRejectedPhasePatternProposal(); + _searchTreeHandler.reportRejectedPhasePatternProposal(); lastProposalAccepted = false; } } @@ -3289,7 +3293,7 @@ void Engine::updatePseudoImpactWithSoICosts( double costOfLastAcceptedPhasePatte ASSERT( constraintsUpdated.size() > 0 ); // Update the Pseudo-Impact estimation. for ( const auto &constraint : constraintsUpdated ) - _smtCore.updatePLConstraintScore( constraint, score ); + _searchTreeHandler.updatePLConstraintScore( constraint, score ); } void Engine::bumpUpPseudoImpactOfPLConstraintsNotInSoI() @@ -3299,7 +3303,7 @@ void Engine::bumpUpPseudoImpactOfPLConstraintsNotInSoI() { if ( plConstraint->isActive() && !plConstraint->supportSoI() && !plConstraint->phaseFixed() && !plConstraint->satisfied() ) - _smtCore.updatePLConstraintScore( + _searchTreeHandler.updatePLConstraintScore( plConstraint, GlobalConfiguration::SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI ); } } @@ -3435,7 +3439,7 @@ bool Engine::shouldProduceProofs() const void Engine::explainSimplexFailure() { - ASSERT( _produceUNSATProofs ); + ASSERT( _produceUNSATProofs && _lpSolverType == LPSolverType::NATIVE ); DEBUG( checkGroundBounds() ); @@ -3463,9 +3467,12 @@ void Engine::explainSimplexFailure() ASSERT( infeasibleVar < _tableau->getN() ); ASSERT( _UNSATCertificateCurrentPointer && !( **_UNSATCertificateCurrentPointer ).getContradiction() ); + _statistics.incUnsignedAttribute( Statistics::NUM_CERTIFIED_LEAVES ); - writeContradictionToCertificate( infeasibleVar ); + Vector leafContradictionVec = computeContradiction( infeasibleVar ); + + writeContradictionToCertificate( leafContradictionVec, infeasibleVar ); ( **_UNSATCertificateCurrentPointer ).makeLeaf(); } @@ -3719,7 +3726,7 @@ const UnsatCertificateNode *Engine::getUNSATCertificateRoot() const bool Engine::certifyUNSATCertificate() { - ASSERT( _produceUNSATProofs && _UNSATCertificate && !_smtCore.getStackDepth() ); + ASSERT( _produceUNSATProofs && _UNSATCertificate && !_searchTreeHandler.getStackDepth() ); for ( auto &constraint : _plConstraints ) { @@ -3733,6 +3740,7 @@ bool Engine::certifyUNSATCertificate() } } + _UNSATCertificateCurrentPointer->get()->deleteUnusedLemmas(); struct timespec certificationStart = TimeUtils::sampleMicro(); _precisionRestorer.restoreInitialEngineState( *this ); @@ -3804,7 +3812,7 @@ void Engine::markLeafToDelegate() // Mark leaf with toDelegate Flag UnsatCertificateNode *currentUnsatCertificateNode = _UNSATCertificateCurrentPointer->get(); ASSERT( _UNSATCertificateCurrentPointer && !currentUnsatCertificateNode->getContradiction() ); - currentUnsatCertificateNode->setDelegationStatus( DelegationStatus::DELEGATE_SAVE ); + currentUnsatCertificateNode->setDelegationStatus( DelegationStatus::DELEGATE_DONT_SAVE ); currentUnsatCertificateNode->deletePLCExplanations(); _statistics.incUnsignedAttribute( Statistics::NUM_DELEGATED_LEAVES ); @@ -3842,15 +3850,13 @@ const Vector Engine::computeContradiction( unsigned infeasibleVar ) cons return contradiction; } -void Engine::writeContradictionToCertificate( unsigned infeasibleVar ) const +void Engine::writeContradictionToCertificate( const Vector &contradiction, + unsigned infeasibleVar ) const { ASSERT( _produceUNSATProofs ); - Vector leafContradictionVec = computeContradiction( infeasibleVar ); - - Contradiction *leafContradiction = leafContradictionVec.empty() - ? new Contradiction( infeasibleVar ) - : new Contradiction( leafContradictionVec ); + Contradiction *leafContradiction = contradiction.empty() ? new Contradiction( infeasibleVar ) + : new Contradiction( contradiction ); ( **_UNSATCertificateCurrentPointer ).setContradiction( leafContradiction ); } @@ -3911,12 +3917,17 @@ void Engine::extractBounds( IQuery &inputQuery ) } } -void Engine::addPLCLemma( std::shared_ptr &explanation ) +void Engine::incNumOfLemmas() { if ( !_produceUNSATProofs ) return; - ASSERT( explanation && _UNSATCertificate && _UNSATCertificateCurrentPointer ) + ASSERT( _UNSATCertificate && _UNSATCertificateCurrentPointer ) _statistics.incUnsignedAttribute( Statistics::NUM_LEMMAS ); - _UNSATCertificateCurrentPointer->get()->addPLCLemma( explanation ); + _statistics.incUnsignedAttribute( Statistics::NUM_LEMMAS_USED ); } + +const List *Engine::getPiecewiseLinearConstraints() const +{ + return &_plConstraints; +} \ No newline at end of file diff --git a/src/engine/Engine.h b/src/engine/Engine.h index a3ea1c22d3..6a38994d05 100644 --- a/src/engine/Engine.h +++ b/src/engine/Engine.h @@ -39,8 +39,8 @@ #include "PrecisionRestorer.h" #include "Preprocessor.h" #include "Query.h" +#include "SearchTreeHandler.h" #include "SignalHandler.h" -#include "SmtCore.h" #include "SmtLibWriter.h" #include "SnCDivideStrategy.h" #include "SparseUnsortedList.h" @@ -83,7 +83,7 @@ class Engine Attempt to find a feasible solution for the input within a time limit (a timeout of 0 means no time limit). Returns true if found, false if infeasible. */ - bool solve( double timeoutInSeconds = 0 ); + bool solve( double timeoutInSeconds = 0 ) override; /* Minimize the cost function with respect to the current set of linear constraints. @@ -125,9 +125,9 @@ class Engine /* Methods for storing and restoring the state of the engine. */ - void storeState( EngineState &state, TableauStateStorageLevel level ) const; - void restoreState( const EngineState &state ); - void setNumPlConstraintsDisabledByValidSplits( unsigned numConstraints ); + void storeState( EngineState &state, TableauStateStorageLevel level ) const override; + void restoreState( const EngineState &state ) override; + void setNumPlConstraintsDisabledByValidSplits( unsigned numConstraints ) override; /* Preprocessor access. @@ -138,7 +138,7 @@ class Engine /* A request from the user to terminate */ - void quitSignal(); + void quitSignal() override; const Statistics *getStatistics() const; @@ -149,7 +149,7 @@ class Engine /* Get the exit code */ - Engine::ExitCode getExitCode() const; + Engine::ExitCode getExitCode() const override; /* Get the quitRequested flag @@ -159,24 +159,24 @@ class Engine /* Get the list of input variables */ - List getInputVariables() const; + List getInputVariables() const override; /* Add equations and tightenings from a split. */ - void applySplit( const PiecewiseLinearCaseSplit &split ); + void applySplit( const PiecewiseLinearCaseSplit &split ) override; /* Hooks invoked before/after context push/pop to store/restore/update context independent data. */ - void postContextPopHook(); - void preContextPushHook(); + void postContextPopHook() override; + void preContextPushHook() override; /* Reset the state of the engine, before solving a new query (as part of DnC mode). */ - void reset(); + void reset() override; /* Reset the statistics object @@ -194,41 +194,41 @@ class Engine void setVerbosity( unsigned verbosity ); /* - Apply the stack to the newly created SmtCore, returns false if UNSAT is + Apply the stack to the newly created SearchTreeHandler, returns false if UNSAT is found in this process. */ - bool restoreSmtState( SmtState &smtState ); + bool restoreSearchTreeState( SearchTreeState &searchTreeState ) override; /* - Store the current stack of the smtCore into smtState + Store the current stack of the searchTreeHandler into searchTreeState */ - void storeSmtState( SmtState &smtState ); + void storeSearchTreeState( SearchTreeState &searchTreeState ) override; /* Pick the piecewise linear constraint for splitting */ - PiecewiseLinearConstraint *pickSplitPLConstraint( DivideStrategy strategy ); + PiecewiseLinearConstraint *pickSplitPLConstraint( DivideStrategy strategy ) override; /* Call-back from QueryDividers Pick the piecewise linear constraint for splitting */ - PiecewiseLinearConstraint *pickSplitPLConstraintSnC( SnCDivideStrategy strategy ); + PiecewiseLinearConstraint *pickSplitPLConstraintSnC( SnCDivideStrategy strategy ) override; /* PSA: The following two methods are for DnC only and should be used very cautiously. */ - void resetSmtCore(); void resetExitCode(); + void resetSearchTreeHandler(); void resetBoundTighteners(); /* Register initial split when in SnC mode */ - void applySnCSplit( PiecewiseLinearCaseSplit sncSplit, String queryId ); + void applySnCSplit( PiecewiseLinearCaseSplit sncSplit, String queryId ) override; - bool inSnCMode() const; + bool inSnCMode() const override; /* Apply bound tightenings stored in the bound manager. @@ -239,71 +239,76 @@ class Engine Apply all bound tightenings (row and matrix-based) in the queue. */ - void applyAllBoundTightenings(); + void applyAllBoundTightenings() override; /* Apply all valid case splits proposed by the constraints. Return true if a valid case split has been applied. */ - bool applyAllValidConstraintCaseSplits(); + bool applyAllValidConstraintCaseSplits() override; void setRandomSeed( unsigned seed ); /* Returns true iff the engine is in proof production mode */ - bool shouldProduceProofs() const; + bool shouldProduceProofs() const override; /* Update the ground bounds */ - void updateGroundUpperBound( unsigned var, double value ); - void updateGroundLowerBound( unsigned var, double value ); + void updateGroundUpperBound( unsigned var, double value ) override; + void updateGroundLowerBound( unsigned var, double value ) override; /* Return all ground bounds as a vector */ - double getGroundBound( unsigned var, bool isUpper ) const; + double getGroundBound( unsigned var, bool isUpper ) const override; /* Get the current pointer of the UNSAT certificate */ - UnsatCertificateNode *getUNSATCertificateCurrentPointer() const; + UnsatCertificateNode *getUNSATCertificateCurrentPointer() const override; /* Set the current pointer of the UNSAT certificate */ - void setUNSATCertificateCurrentPointer( UnsatCertificateNode *node ); + void setUNSATCertificateCurrentPointer( UnsatCertificateNode *node ) override; /* Get the pointer to the root of the UNSAT certificate */ - const UnsatCertificateNode *getUNSATCertificateRoot() const; + const UnsatCertificateNode *getUNSATCertificateRoot() const override; /* Certify the UNSAT certificate */ - bool certifyUNSATCertificate(); + bool certifyUNSATCertificate() override; /* Get the boundExplainer */ - const BoundExplainer *getBoundExplainer() const; + const BoundExplainer *getBoundExplainer() const override; /* Set the boundExplainer */ - void setBoundExplainerContent( BoundExplainer *boundExplainer ); + void setBoundExplainerContent( BoundExplainer *boundExplainer ) override; /* Propagate bound tightenings stored in the BoundManager */ - void propagateBoundManagerTightenings(); + void propagateBoundManagerTightenings() override; /* Add lemma to the UNSAT Certificate */ - void addPLCLemma( std::shared_ptr &explanation ); + void incNumOfLemmas() override; + + /* + For debugging purpose + */ + const List *getPiecewiseLinearConstraints() const override; private: enum BasisRestorationRequired { @@ -327,7 +332,7 @@ class Engine /* Context is the central object that manages memory and back-tracking - across context-dependent components - SMTCore, + across context-dependent components - SearchTreeHandler, PiecewiseLinearConstraints, BoundManager, etc. */ Context _context; @@ -387,9 +392,9 @@ class Engine AutoRowBoundTightener _rowBoundTightener; /* - The SMT engine is in charge of case splitting. + The Search Tree engine is in charge of case splitting. */ - SmtCore _smtCore; + SearchTreeHandler _searchTreeHandler; /* Number of pl constraints disabled by valid splits. @@ -592,7 +597,7 @@ class Engine void selectViolatedPlConstraint(); /* - Report the violated PL constraint to the SMT engine. + Report the violated PL constraint to the Search Tree engine. */ void reportPlViolation(); @@ -817,15 +822,15 @@ class Engine /* Get Context reference */ - Context &getContext() + Context &getContext() override { return _context; } /* - Checks whether the current bounds are consistent. Exposed for the SmtCore. + Checks whether the current bounds are consistent. Exposed for the SearchTreeHandler. */ - bool consistentBounds() const; + bool consistentBounds() const override; /* DEBUG only @@ -834,7 +839,7 @@ class Engine void checkGurobiBoundConsistency() const; /* - Proof Production data structes + Proof Production data structures */ bool _produceUNSATProofs; @@ -850,7 +855,7 @@ class Engine /* Returns the value of a variable bound, as explained by the BoundExplainer */ - double explainBound( unsigned var, bool isUpper ) const; + double explainBound( unsigned var, bool isUpper ) const override; /* Returns true iff both bounds are epsilon close to their explained bounds @@ -865,7 +870,7 @@ class Engine /* Finds the variable causing failure and updates its bounds explanations */ - void explainSimplexFailure(); + void explainSimplexFailure() override; /* Sanity check for ground bounds, returns true iff all bounds are at least as tight as their @@ -904,7 +909,8 @@ class Engine /* Writes the details of a contradiction to the UNSAT certificate node */ - void writeContradictionToCertificate( unsigned infeasibleVar ) const; + void writeContradictionToCertificate( const Vector &contradiction, + unsigned infeasibleVar ) const; }; #endif // __Engine_h__ diff --git a/src/engine/EngineState.h b/src/engine/EngineState.h index 3e174bac02..b423173f12 100644 --- a/src/engine/EngineState.h +++ b/src/engine/EngineState.h @@ -42,7 +42,7 @@ class EngineState /* A unique ID allocated to every state that is stored, for - debugging purposes. These are assigned by the SMT core. + debugging purposes. These are assigned by the Search Tree handler. */ unsigned _stateId; }; diff --git a/src/engine/IBoundManager.h b/src/engine/IBoundManager.h index f2aad70ce5..8731261b26 100644 --- a/src/engine/IBoundManager.h +++ b/src/engine/IBoundManager.h @@ -27,7 +27,6 @@ #define __IBoundManager_h__ #include "List.h" -#include "PiecewiseLinearFunctionType.h" #include "Tightening.h" #include "Vector.h" @@ -38,6 +37,7 @@ class SparseUnsortedList; class TableauRow; class ITableau; class IRowBoundTightener; +class PiecewiseLinearConstraint; class IBoundManager { public: @@ -127,13 +127,14 @@ class IBoundManager Add a lemma to the UNSATCertificateNode object Return true iff adding the lemma was successful */ - virtual bool - addLemmaExplanationAndTightenBound( unsigned var, - double value, - Tightening::BoundType affectedVarBound, - const List &causingVars, - Tightening::BoundType causingVarBound, - PiecewiseLinearFunctionType constraintType ) = 0; + virtual bool addLemmaExplanationAndTightenBound( unsigned var, + double value, + Tightening::BoundType affectedVarBound, + const List &causingVars, + Tightening::BoundType causingVarBound, + PiecewiseLinearConstraint &constraint, + bool isPhaseFixing = false, + double minTargetBound = 0 ) = 0; /* Return the content of the object containing all explanations for variable bounds in the diff --git a/src/engine/IEngine.h b/src/engine/IEngine.h index ea592bb4e5..a97f6c316a 100644 --- a/src/engine/IEngine.h +++ b/src/engine/IEngine.h @@ -32,10 +32,10 @@ class EngineState; class Equation; class PiecewiseLinearCaseSplit; -class PLCLemma; -class SmtState; +class SearchTreeState; class String; class PiecewiseLinearConstraint; +class PLCLemma; class UnsatCertificateNode; class IEngine @@ -79,15 +79,15 @@ class IEngine virtual void setNumPlConstraintsDisabledByValidSplits( unsigned numConstraints ) = 0; /* - Store the current stack of the smtCore into smtState + Store the current stack of the searchTreeHandler into searchTreeState */ - virtual void storeSmtState( SmtState &smtState ) = 0; + virtual void storeSearchTreeState( SearchTreeState &searchTreeState ) = 0; /* - Apply the stack to the newly created SmtCore, returns false if UNSAT is + Apply the stack to the newly created SearchTreeHandler, returns false if UNSAT is found in this process. */ - virtual bool restoreSmtState( SmtState &smtState ) = 0; + virtual bool restoreSearchTreeState( SearchTreeState &searchTreeState ) = 0; /* Solve the encoded query. @@ -122,8 +122,8 @@ class IEngine virtual double explainBound( unsigned var, bool isUpper ) const = 0; /* - * Update the ground bounds - */ + Update the ground bounds + */ virtual void updateGroundUpperBound( unsigned var, double value ) = 0; virtual void updateGroundLowerBound( unsigned var, double value ) = 0; @@ -190,7 +190,9 @@ class IEngine /* Add lemma to the UNSAT Certificate */ - virtual void addPLCLemma( std::shared_ptr &explanation ) = 0; + virtual void incNumOfLemmas() = 0; + + virtual const List *getPiecewiseLinearConstraints() const = 0; }; #endif // __IEngine_h__ diff --git a/src/engine/LeakyReluConstraint.cpp b/src/engine/LeakyReluConstraint.cpp index 28aec9033c..388caf52b2 100644 --- a/src/engine/LeakyReluConstraint.cpp +++ b/src/engine/LeakyReluConstraint.cpp @@ -15,11 +15,9 @@ #include "LeakyReluConstraint.h" #include "Debug.h" -#include "DivideStrategy.h" #include "FloatUtils.h" #include "GlobalConfiguration.h" #include "ITableau.h" -#include "InfeasibleQueryException.h" #include "MStringf.h" #include "MarabouError.h" #include "PiecewiseLinearCaseSplit.h" @@ -193,8 +191,14 @@ void LeakyReluConstraint::notifyLowerBound( unsigned variable, double bound ) { // If we're in the active phase, activeAux should be 0 if ( proofs ) - _boundManager->addLemmaExplanationAndTightenBound( - _activeAux, 0, Tightening::UB, { variable }, Tightening::LB, getType() ); + _boundManager->addLemmaExplanationAndTightenBound( _activeAux, + 0, + Tightening::UB, + { variable }, + Tightening::LB, + *this, + true, + 0 ); else if ( !proofs && _auxVarsInUse ) _boundManager->tightenUpperBound( _activeAux, 0 ); @@ -209,8 +213,14 @@ void LeakyReluConstraint::notifyLowerBound( unsigned variable, double bound ) else if ( variable == _f && FloatUtils::isNegative( bound ) ) { if ( proofs ) - _boundManager->addLemmaExplanationAndTightenBound( - _b, bound / _slope, Tightening::LB, { _f }, Tightening::LB, getType() ); + _boundManager->addLemmaExplanationAndTightenBound( _b, + bound / _slope, + Tightening::LB, + { _f }, + Tightening::LB, + *this, + false, + bound ); else _boundManager->tightenLowerBound( _b, bound / _slope, *_inactiveTighteningRow ); } @@ -221,8 +231,14 @@ void LeakyReluConstraint::notifyLowerBound( unsigned variable, double bound ) { // Inactive phase if ( proofs ) - _boundManager->addLemmaExplanationAndTightenBound( - _inactiveAux, 0, Tightening::UB, { _activeAux }, Tightening::LB, getType() ); + _boundManager->addLemmaExplanationAndTightenBound( _inactiveAux, + 0, + Tightening::UB, + { _activeAux }, + Tightening::LB, + *this, + true, + 0 ); else _boundManager->tightenUpperBound( _inactiveAux, 0 ); } @@ -232,8 +248,14 @@ void LeakyReluConstraint::notifyLowerBound( unsigned variable, double bound ) { // Active phase if ( proofs ) - _boundManager->addLemmaExplanationAndTightenBound( - _activeAux, 0, Tightening::UB, { _inactiveAux }, Tightening::LB, getType() ); + _boundManager->addLemmaExplanationAndTightenBound( _activeAux, + 0, + Tightening::UB, + { _inactiveAux }, + Tightening::LB, + *this, + true, + 0 ); else _boundManager->tightenUpperBound( _activeAux, 0 ); } @@ -270,8 +292,14 @@ void LeakyReluConstraint::notifyUpperBound( unsigned variable, double bound ) { unsigned partner = ( variable == _f ) ? _b : _f; if ( proofs ) - _boundManager->addLemmaExplanationAndTightenBound( - partner, bound, Tightening::UB, { variable }, Tightening::UB, getType() ); + _boundManager->addLemmaExplanationAndTightenBound( partner, + bound, + Tightening::UB, + { variable }, + Tightening::UB, + *this, + false, + bound ); else _boundManager->tightenUpperBound( partner, bound ); } @@ -280,7 +308,7 @@ void LeakyReluConstraint::notifyUpperBound( unsigned variable, double bound ) // A negative upper bound of b implies inactive phase if ( proofs && _auxVarsInUse ) _boundManager->addLemmaExplanationAndTightenBound( - _inactiveAux, 0, Tightening::UB, { _b }, Tightening::UB, getType() ); + _inactiveAux, 0, Tightening::UB, { _b }, Tightening::UB, *this, true, 0 ); _boundManager->tightenUpperBound( _f, _slope * bound, *_inactiveTighteningRow ); } @@ -289,7 +317,7 @@ void LeakyReluConstraint::notifyUpperBound( unsigned variable, double bound ) // A negative upper bound of f implies inactive phase as well if ( proofs && _auxVarsInUse ) _boundManager->addLemmaExplanationAndTightenBound( - _inactiveAux, 0, Tightening::UB, { _f }, Tightening::UB, getType() ); + _inactiveAux, 0, Tightening::UB, { _f }, Tightening::UB, *this, true, 0 ); _boundManager->tightenUpperBound( _b, bound / _slope, *_inactiveTighteningRow ); } @@ -403,7 +431,7 @@ List LeakyReluConstraint::getSmartFixes( ITablea List LeakyReluConstraint::getCaseSplits() const { - if ( _phaseStatus != PHASE_NOT_FIXED ) + if ( getPhaseStatus() != PHASE_NOT_FIXED ) throw MarabouError( MarabouError::REQUESTED_CASE_SPLITS_FROM_FIXED_CONSTRAINT ); List splits; @@ -525,14 +553,14 @@ PiecewiseLinearCaseSplit LeakyReluConstraint::getActiveSplit() const bool LeakyReluConstraint::phaseFixed() const { - return _phaseStatus != PHASE_NOT_FIXED; + return getPhaseStatus() != PHASE_NOT_FIXED; } PiecewiseLinearCaseSplit LeakyReluConstraint::getImpliedCaseSplit() const { - ASSERT( _phaseStatus != PHASE_NOT_FIXED ); + ASSERT( getPhaseStatus() != PHASE_NOT_FIXED ); - if ( _phaseStatus == RELU_PHASE_ACTIVE ) + if ( getPhaseStatus() == RELU_PHASE_ACTIVE ) return getActiveSplit(); return getInactiveSplit(); @@ -551,8 +579,8 @@ void LeakyReluConstraint::dump( String &output ) const _b, _slope, _constraintActive ? "Yes" : "No", - _phaseStatus, - phaseToString( _phaseStatus ).ascii() ); + getPhaseStatus(), + phaseToString( getPhaseStatus() ).ascii() ); output += Stringf( "b in [%s, %s], ", @@ -625,18 +653,18 @@ void LeakyReluConstraint::eliminateVariable( __attribute__( ( unused ) ) unsigne { if ( FloatUtils::gt( fixedValue, 0 ) ) { - ASSERT( _phaseStatus != RELU_PHASE_INACTIVE ); + ASSERT( getPhaseStatus() != RELU_PHASE_INACTIVE ); } else if ( FloatUtils::lt( fixedValue, 0 ) ) { - ASSERT( _phaseStatus != RELU_PHASE_ACTIVE ); + ASSERT( getPhaseStatus() != RELU_PHASE_ACTIVE ); } } else if ( variable == _activeAux ) { if ( FloatUtils::isPositive( fixedValue ) ) { - ASSERT( _phaseStatus != RELU_PHASE_ACTIVE ); + ASSERT( getPhaseStatus() != RELU_PHASE_ACTIVE ); } } else @@ -644,7 +672,7 @@ void LeakyReluConstraint::eliminateVariable( __attribute__( ( unused ) ) unsigne // This is the inactive aux variable if ( FloatUtils::isPositive( fixedValue ) ) { - ASSERT( _phaseStatus != RELU_PHASE_INACTIVE ); + ASSERT( getPhaseStatus() != RELU_PHASE_INACTIVE ); } } } ); diff --git a/src/engine/MaxConstraint.cpp b/src/engine/MaxConstraint.cpp index 13571c44a5..71d67d37a4 100644 --- a/src/engine/MaxConstraint.cpp +++ b/src/engine/MaxConstraint.cpp @@ -186,8 +186,8 @@ void MaxConstraint::notifyLowerBound( unsigned variable, double value ) } if ( phaseFixed() ) - _phaseStatus = ( _haveFeasibleEliminatedPhases ? MAX_PHASE_ELIMINATED - : variableToPhase( *_elements.begin() ) ); + setPhaseStatus( ( _haveFeasibleEliminatedPhases ? MAX_PHASE_ELIMINATED + : variableToPhase( *_elements.begin() ) ) ); if ( isActive() && _boundManager ) { @@ -247,8 +247,8 @@ void MaxConstraint::notifyUpperBound( unsigned variable, double value ) } if ( phaseFixed() ) - _phaseStatus = ( _haveFeasibleEliminatedPhases ? MAX_PHASE_ELIMINATED - : variableToPhase( *_elements.begin() ) ); + setPhaseStatus( ( _haveFeasibleEliminatedPhases ? MAX_PHASE_ELIMINATED + : variableToPhase( *_elements.begin() ) ) ); // There is no need to recompute the max lower bound and max index here. @@ -481,8 +481,8 @@ void MaxConstraint::updateVariableIndex( unsigned oldIndex, unsigned newIndex ) _elementToAux[newIndex] = auxVar; _auxToElement[auxVar] = newIndex; - if ( _phaseStatus == variableToPhase( oldIndex ) ) - _phaseStatus = variableToPhase( newIndex ); + if ( getPhaseStatus() == variableToPhase( oldIndex ) ) + setPhaseStatus( variableToPhase( newIndex ) ); } else { @@ -533,8 +533,8 @@ void MaxConstraint::eliminateVariable( unsigned var, double value ) } if ( phaseFixed() ) - _phaseStatus = ( _haveFeasibleEliminatedPhases ? MAX_PHASE_ELIMINATED - : variableToPhase( *_elements.begin() ) ); + setPhaseStatus( _haveFeasibleEliminatedPhases ? MAX_PHASE_ELIMINATED + : variableToPhase( *_elements.begin() ) ); if ( _elements.size() == 0 ) _obsolete = true; @@ -739,7 +739,7 @@ void MaxConstraint::addTableauAuxVar( unsigned tableauAuxVar, unsigned constrain _elementToTighteningRow[element] = nullptr; } -void MaxConstraint::applyTightenings( const List &tightenings ) const +void MaxConstraint::applyTightenings( const List &tightenings ) { bool proofs = _boundManager && _boundManager->shouldProduceProofs(); @@ -781,7 +781,9 @@ void MaxConstraint::applyTightenings( const List &tightenings ) cons Tightening::UB, getElements(), Tightening::UB, - getType() ); + *this, + false, + tightening._value ); else { ASSERT( _elements.exists( tightening._variable ) ); diff --git a/src/engine/MaxConstraint.h b/src/engine/MaxConstraint.h index 74445e5cf4..0b4f324dc6 100644 --- a/src/engine/MaxConstraint.h +++ b/src/engine/MaxConstraint.h @@ -301,7 +301,7 @@ class MaxConstraint : public PiecewiseLinearConstraint /* Apply tightenings in the list, discovered by getEntailedTightenings */ - void applyTightenings( const List &tightenings ) const; + void applyTightenings( const List &tightenings ); }; #endif // __MaxConstraint_h__ diff --git a/src/engine/PiecewiseLinearConstraint.cpp b/src/engine/PiecewiseLinearConstraint.cpp index 400d9ba01c..a76bc4ea5e 100644 --- a/src/engine/PiecewiseLinearConstraint.cpp +++ b/src/engine/PiecewiseLinearConstraint.cpp @@ -47,6 +47,7 @@ PiecewiseLinearConstraint::PiecewiseLinearConstraint( unsigned numCases ) , _score( FloatUtils::negativeInfinity() ) , _statistics( NULL ) , _gurobi( NULL ) + , _tableauAuxVars() { } diff --git a/src/engine/PiecewiseLinearConstraint.h b/src/engine/PiecewiseLinearConstraint.h index 4304baa019..544ebad167 100644 --- a/src/engine/PiecewiseLinearConstraint.h +++ b/src/engine/PiecewiseLinearConstraint.h @@ -204,7 +204,7 @@ class PiecewiseLinearConstraint : public ITableau::VariableWatcher /* If the constraint's phase has been fixed, get the (valid) case split. Transitioning from Valid to Implied with integration of - context-dependentSMTCore. + context-dependent SearchTreeHandler. */ virtual PiecewiseLinearCaseSplit getValidCaseSplit() const = 0; virtual PiecewiseLinearCaseSplit getImpliedCaseSplit() const = 0; @@ -363,6 +363,7 @@ class PiecewiseLinearConstraint : public ITableau::VariableWatcher { _tableau = tableau; } + /* Method to set PhaseStatus of the constraint. Encapsulates both context dependent and context-less behavior. Initialized to PHASE_NOT_FIXED. @@ -509,7 +510,8 @@ class PiecewiseLinearConstraint : public ITableau::VariableWatcher Map _upperBounds; IBoundManager *_boundManager; // Pointer to a centralized object to store bounds. - ITableau *_tableau; // Pointer to tableau which simulates CBT until we switch to CDSmtCore + ITableau *_tableau; // Pointer to tableau which simulates CBT until we switch to + // CDSearchTreeHandler CVC4::context::Context *_context; CVC4::context::CDO *_cdConstraintActive; diff --git a/src/engine/PolygonalTightening.h b/src/engine/PolygonalTightening.h index 2a941c37f1..2eddeb176f 100644 --- a/src/engine/PolygonalTightening.h +++ b/src/engine/PolygonalTightening.h @@ -16,6 +16,7 @@ #define __PolygonalTightening_h__ #include "FloatUtils.h" +#include "MStringf.h" #include "Map.h" #include "NeuronIndex.h" @@ -89,20 +90,35 @@ class PolygonalTightening void dump() const { - printf( "PolygonalTightening: " ); + String output = "PolygonalTightening: "; + unsigned count = 0; for ( const auto &pair : _neuronToCoefficient ) { double coeff = pair.second; if ( FloatUtils::isZero( coeff ) ) continue; - printf( "%s %.2lf neuron%u_%u ", - coeff > 0 ? "+" : "-", - FloatUtils::abs( coeff ), - pair.first._layer, - pair.first._neuron ); + if ( count ) + { + output += Stringf( "%s %.2lf neuron%u_%u ", + FloatUtils::isPositive( coeff ) ? "+" : "-", + FloatUtils::abs( coeff ), + pair.first._layer, + pair.first._neuron ); + } + else + { + output += + Stringf( "%.2lf neuron%u_%u ", coeff, pair.first._layer, pair.first._neuron ); + } + ++count; + } + if ( count == 0 ) + { + output += Stringf( "%.2lf ", 0 ); } - printf( "%s %.2lf\n", _type == LB ? ">=" : "<=", _value ); + output += Stringf( "%s %.2lf", _type == LB ? ">=" : "<=", _value ); + printf( "%s\n", output.ascii() ); } }; -#endif // __PolygonalTightening_h__s +#endif // __PolygonalTightening_h diff --git a/src/engine/PrecisionRestorer.cpp b/src/engine/PrecisionRestorer.cpp index 6d75420e8d..ce63b7b993 100644 --- a/src/engine/PrecisionRestorer.cpp +++ b/src/engine/PrecisionRestorer.cpp @@ -19,7 +19,7 @@ #include "FloatUtils.h" #include "MalformedBasisException.h" #include "MarabouError.h" -#include "SmtCore.h" +#include "SearchTreeHandler.h" #include "TableauStateStorageLevel.h" #include "UnsatCertificateNode.h" @@ -35,9 +35,16 @@ void PrecisionRestorer::restoreInitialEngineState( IEngine &engine ) void PrecisionRestorer::restorePrecision( IEngine &engine, ITableau &tableau, - SmtCore &smtCore, + SearchTreeHandler &searchTreeHandler, RestoreBasics restoreBasics ) { + Map> plcStatusBefore; + DEBUG( { + for ( const auto *plc : *engine.getPiecewiseLinearConstraints() ) + plcStatusBefore.insert( + plc, Pair( plc->getPhaseStatus(), plc->isActive() ) ); + } ); + // Store the dimensions, bounds and basic variables in the current tableau, // before restoring it unsigned targetM = tableau.getM(); @@ -77,7 +84,7 @@ void PrecisionRestorer::restorePrecision( IEngine &engine, // Store the case splits performed so far List targetSplits; - smtCore.allSplitsSoFar( targetSplits ); + searchTreeHandler.allSplitsSoFar( targetSplits ); // Restore engine and tableau to their original form engine.restoreState( _initialEngineState ); @@ -87,7 +94,7 @@ void PrecisionRestorer::restorePrecision( IEngine &engine, // At this point, the tableau has the appropriate dimensions. Restore the // variable bounds and basic variables. Note that if column merging is // enabled, the dimensions may not be precisely those before the - // resotration, because merging sometimes fails - in which case an equation + // restoration, because merging sometimes fails - in which case an equation // is added. If we fail to restore the dimensions, we cannot restore the // basics. @@ -163,14 +170,10 @@ void PrecisionRestorer::restorePrecision( IEngine &engine, ASSERT( GlobalConfiguration::USE_COLUMN_MERGING_EQUATIONS || tableau.getN() == targetN ); ASSERT( GlobalConfiguration::USE_COLUMN_MERGING_EQUATIONS || tableau.getM() == targetM ); - // Constraints should be in the same state before and after restoration - for ( const auto &pair : targetEngineState._plConstraintToState ) + for ( const auto *plc : *engine.getPiecewiseLinearConstraints() ) { - ASSERT( pair.second->isActive() == pair.first->isActive() ); - // Only active constraints need to be synchronized - ASSERT( !pair.second->isActive() || - pair.second->phaseFixed() == pair.first->phaseFixed() ); - ASSERT( pair.second->constraintObsolete() == pair.first->constraintObsolete() ); + ASSERT( plc->getPhaseStatus() == plcStatusBefore.get( plc ).first() ); + ASSERT( plc->isActive() == plcStatusBefore.get( plc ).second() ); } EngineState currentEngineState; diff --git a/src/engine/PrecisionRestorer.h b/src/engine/PrecisionRestorer.h index 3fe0c6683b..3f058e1231 100644 --- a/src/engine/PrecisionRestorer.h +++ b/src/engine/PrecisionRestorer.h @@ -18,7 +18,7 @@ #include "EngineState.h" -class SmtCore; +class SearchTreeHandler; class PrecisionRestorer { @@ -33,7 +33,7 @@ class PrecisionRestorer void restorePrecision( IEngine &engine, ITableau &tableau, - SmtCore &smtCore, + SearchTreeHandler &searchTreeHandler, RestoreBasics restoreBasics ); private: diff --git a/src/engine/ReluConstraint.cpp b/src/engine/ReluConstraint.cpp index d48be10827..139e8563e4 100644 --- a/src/engine/ReluConstraint.cpp +++ b/src/engine/ReluConstraint.cpp @@ -173,7 +173,7 @@ void ReluConstraint::notifyLowerBound( unsigned variable, double newBound ) // If we're in the active phase, aux should be 0 if ( proofs && _auxVarInUse ) _boundManager->addLemmaExplanationAndTightenBound( - _aux, 0, Tightening::UB, { variable }, Tightening::LB, getType() ); + _aux, 0, Tightening::UB, { variable }, Tightening::LB, *this, true, 0 ); else if ( !proofs && _auxVarInUse ) _boundManager->tightenUpperBound( _aux, 0 ); @@ -187,7 +187,7 @@ void ReluConstraint::notifyLowerBound( unsigned variable, double newBound ) { if ( proofs && _auxVarInUse ) _boundManager->addLemmaExplanationAndTightenBound( - _aux, 0, Tightening::UB, { variable }, Tightening::LB, getType() ); + _aux, 0, Tightening::UB, { variable }, Tightening::LB, *this, true, 0 ); else if ( !proofs && _auxVarInUse ) _boundManager->tightenUpperBound( _aux, 0 ); } @@ -198,7 +198,7 @@ void ReluConstraint::notifyLowerBound( unsigned variable, double newBound ) { if ( proofs ) _boundManager->addLemmaExplanationAndTightenBound( - _f, 0, Tightening::UB, { variable }, Tightening::LB, getType() ); + _f, 0, Tightening::UB, { variable }, Tightening::LB, *this, true, 0 ); else _boundManager->tightenUpperBound( _f, 0 ); @@ -212,11 +212,17 @@ void ReluConstraint::notifyLowerBound( unsigned variable, double newBound ) if ( proofs ) { // If already inactive, tightening is linear - if ( _phaseStatus == RELU_PHASE_INACTIVE ) + if ( getPhaseStatus() == RELU_PHASE_INACTIVE ) _boundManager->tightenUpperBound( _aux, -bound, *_tighteningRow ); - else if ( _phaseStatus == PHASE_NOT_FIXED ) - _boundManager->addLemmaExplanationAndTightenBound( - _aux, -bound, Tightening::UB, { variable }, Tightening::LB, getType() ); + else if ( getPhaseStatus() == PHASE_NOT_FIXED ) + _boundManager->addLemmaExplanationAndTightenBound( _aux, + -bound, + Tightening::UB, + { variable }, + Tightening::LB, + *this, + false, + bound ); } else _boundManager->tightenUpperBound( _aux, -bound ); @@ -228,7 +234,7 @@ void ReluConstraint::notifyLowerBound( unsigned variable, double newBound ) { if ( proofs ) _boundManager->addLemmaExplanationAndTightenBound( - _f, 0, Tightening::LB, { variable }, Tightening::LB, getType() ); + _f, 0, Tightening::LB, { variable }, Tightening::LB, *this, false, 0 ); else _boundManager->tightenLowerBound( _f, 0 ); } @@ -267,13 +273,19 @@ void ReluConstraint::notifyUpperBound( unsigned variable, double newBound ) { if ( proofs ) { - if ( _phaseStatus != RELU_PHASE_INACTIVE ) + if ( getPhaseStatus() != RELU_PHASE_INACTIVE ) _boundManager->tightenUpperBound( _b, bound, *_tighteningRow ); else { if ( !FloatUtils::isPositive( bound ) ) - _boundManager->addLemmaExplanationAndTightenBound( - _b, 0, Tightening::UB, { variable }, Tightening::UB, getType() ); + _boundManager->addLemmaExplanationAndTightenBound( _b, + 0, + Tightening::UB, + { variable }, + Tightening::UB, + *this, + true, + 0 ); // Bound cannot be negative if ReLU is inactive if ( FloatUtils::isNegative( bound ) ) throw InfeasibleQueryException(); @@ -289,7 +301,7 @@ void ReluConstraint::notifyUpperBound( unsigned variable, double newBound ) // If b has a non-positive upper bound, f's upper bound is 0 if ( proofs ) _boundManager->addLemmaExplanationAndTightenBound( - _f, 0, Tightening::UB, { variable }, Tightening::UB, getType() ); + _f, 0, Tightening::UB, { variable }, Tightening::UB, *this, true, 0 ); else _boundManager->tightenUpperBound( _f, 0 ); @@ -304,15 +316,17 @@ void ReluConstraint::notifyUpperBound( unsigned variable, double newBound ) if ( proofs ) { // If already inactive, tightening is linear - if ( _phaseStatus == RELU_PHASE_ACTIVE ) + if ( getPhaseStatus() == RELU_PHASE_ACTIVE ) _boundManager->tightenUpperBound( _f, bound, *_tighteningRow ); - else if ( _phaseStatus == PHASE_NOT_FIXED ) + else if ( getPhaseStatus() == PHASE_NOT_FIXED ) _boundManager->addLemmaExplanationAndTightenBound( _f, bound, Tightening::UB, { variable }, Tightening::UB, - getType() ); + *this, + false, + bound ); } else _boundManager->tightenUpperBound( _f, bound ); @@ -322,13 +336,19 @@ void ReluConstraint::notifyUpperBound( unsigned variable, double newBound ) { if ( proofs ) { - if ( _phaseStatus != RELU_PHASE_ACTIVE ) + if ( getPhaseStatus() != RELU_PHASE_ACTIVE ) _boundManager->tightenLowerBound( _b, -bound, *_tighteningRow ); else { if ( !FloatUtils::isPositive( bound ) ) - _boundManager->addLemmaExplanationAndTightenBound( - _b, 0, Tightening::LB, { variable }, Tightening::UB, getType() ); + _boundManager->addLemmaExplanationAndTightenBound( _b, + 0, + Tightening::LB, + { variable }, + Tightening::UB, + *this, + true, + 0 ); // Bound cannot be negative if ReLU is active if ( FloatUtils::isNegative( bound ) ) throw InfeasibleQueryException(); @@ -554,7 +574,7 @@ List ReluConstraint::getSmartFixes( ITableau *ta List ReluConstraint::getCaseSplits() const { - if ( _phaseStatus != PHASE_NOT_FIXED ) + if ( getPhaseStatus() != PHASE_NOT_FIXED ) throw MarabouError( MarabouError::REQUESTED_CASE_SPLITS_FROM_FIXED_CONSTRAINT ); List splits; @@ -664,14 +684,14 @@ PiecewiseLinearCaseSplit ReluConstraint::getActiveSplit() const bool ReluConstraint::phaseFixed() const { - return _phaseStatus != PHASE_NOT_FIXED; + return getPhaseStatus() != PHASE_NOT_FIXED; } PiecewiseLinearCaseSplit ReluConstraint::getImpliedCaseSplit() const { - ASSERT( _phaseStatus != PHASE_NOT_FIXED ); + ASSERT( getPhaseStatus() != PHASE_NOT_FIXED ); - if ( _phaseStatus == RELU_PHASE_ACTIVE ) + if ( getPhaseStatus() == RELU_PHASE_ACTIVE ) return getActiveSplit(); return getInactiveSplit(); @@ -688,8 +708,8 @@ void ReluConstraint::dump( String &output ) const _f, _b, _constraintActive ? "Yes" : "No", - _phaseStatus, - phaseToString( _phaseStatus ).ascii() ); + getPhaseStatus(), + phaseToString( getPhaseStatus() ).ascii() ); output += Stringf( "b in [%s, %s], ", @@ -756,11 +776,11 @@ void ReluConstraint::eliminateVariable( __attribute__( ( unused ) ) unsigned var { if ( FloatUtils::gt( fixedValue, 0 ) ) { - ASSERT( _phaseStatus != RELU_PHASE_INACTIVE ); + ASSERT( getPhaseStatus() != RELU_PHASE_INACTIVE ); } else if ( FloatUtils::lt( fixedValue, 0 ) ) { - ASSERT( _phaseStatus != RELU_PHASE_ACTIVE ); + ASSERT( getPhaseStatus() != RELU_PHASE_ACTIVE ); } } else @@ -768,7 +788,7 @@ void ReluConstraint::eliminateVariable( __attribute__( ( unused ) ) unsigned var // This is the aux variable if ( FloatUtils::isPositive( fixedValue ) ) { - ASSERT( _phaseStatus != RELU_PHASE_ACTIVE ); + ASSERT( getPhaseStatus() != RELU_PHASE_ACTIVE ); } } } ); diff --git a/src/engine/SmtCore.cpp b/src/engine/SearchTreeHandler.cpp similarity index 86% rename from src/engine/SmtCore.cpp rename to src/engine/SearchTreeHandler.cpp index bdf75aadaa..9fc319d5a7 100644 --- a/src/engine/SmtCore.cpp +++ b/src/engine/SearchTreeHandler.cpp @@ -1,5 +1,5 @@ /********************* */ -/*! \file SmtCore.cpp +/*! \file SearchTreeHandler.cpp ** \verbatim ** Top contributors (to current version): ** Guy Katz, Parth Shah, Duligur Ibeling @@ -11,12 +11,11 @@ ** ** [[ Add lengthier description here ]] - **/ +**/ -#include "SmtCore.h" +#include "SearchTreeHandler.h" #include "Debug.h" -#include "DivideStrategy.h" #include "EngineState.h" #include "FloatUtils.h" #include "GlobalConfiguration.h" @@ -25,10 +24,9 @@ #include "MarabouError.h" #include "Options.h" #include "PseudoImpactTracker.h" -#include "ReluConstraint.h" #include "UnsatCertificateNode.h" -SmtCore::SmtCore( IEngine *engine ) +SearchTreeHandler::SearchTreeHandler( IEngine *engine ) : _statistics( NULL ) , _engine( engine ) , _context( _engine->getContext() ) @@ -44,12 +42,12 @@ SmtCore::SmtCore( IEngine *engine ) { } -SmtCore::~SmtCore() +SearchTreeHandler::~SearchTreeHandler() { freeMemory(); } -void SmtCore::freeMemory() +void SearchTreeHandler::freeMemory() { for ( const auto &stackEntry : _stack ) { @@ -60,7 +58,7 @@ void SmtCore::freeMemory() _stack.clear(); } -void SmtCore::reset() +void SearchTreeHandler::reset() { _context.popto( 0 ); _engine->postContextPopHook(); @@ -73,7 +71,7 @@ void SmtCore::reset() _numRejectedPhasePatternProposal = 0; } -void SmtCore::reportViolatedConstraint( PiecewiseLinearConstraint *constraint ) +void SearchTreeHandler::reportViolatedConstraint( PiecewiseLinearConstraint *constraint ) { if ( !_constraintToViolationCount.exists( constraint ) ) _constraintToViolationCount[constraint] = 0; @@ -90,7 +88,7 @@ void SmtCore::reportViolatedConstraint( PiecewiseLinearConstraint *constraint ) } } -unsigned SmtCore::getViolationCounts( PiecewiseLinearConstraint *constraint ) const +unsigned SearchTreeHandler::getViolationCounts( PiecewiseLinearConstraint *constraint ) const { if ( !_constraintToViolationCount.exists( constraint ) ) return 0; @@ -98,7 +96,7 @@ unsigned SmtCore::getViolationCounts( PiecewiseLinearConstraint *constraint ) co return _constraintToViolationCount[constraint]; } -void SmtCore::initializeScoreTrackerIfNeeded( +void SearchTreeHandler::initializeScoreTrackerIfNeeded( const List &plConstraints ) { if ( GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH ) @@ -106,11 +104,11 @@ void SmtCore::initializeScoreTrackerIfNeeded( _scoreTracker = std::unique_ptr( new PseudoImpactTracker() ); _scoreTracker->initialize( plConstraints ); - SMT_LOG( "\tTracking Pseudo Impact..." ); + SEARCH_TREE_LOG( "\tTracking Pseudo Impact..." ); } } -void SmtCore::reportRejectedPhasePatternProposal() +void SearchTreeHandler::reportRejectedPhasePatternProposal() { ++_numRejectedPhasePatternProposal; @@ -126,12 +124,12 @@ void SmtCore::reportRejectedPhasePatternProposal() } } -bool SmtCore::needToSplit() const +bool SearchTreeHandler::needToSplit() const { return _needToSplit; } -void SmtCore::performSplit() +void SearchTreeHandler::performSplit() { ASSERT( _needToSplit ); @@ -182,7 +180,7 @@ void SmtCore::performSplit() new UnsatCertificateNode( certificateNode, childSplit ); } - SmtStackEntry *stackEntry = new SmtStackEntry; + SearchTreeStackEntry *stackEntry = new SearchTreeStackEntry; // Perform the first split: add bounds and equations List::iterator split = splits.begin(); ASSERT( split->getEquations().size() == 0 ); @@ -218,21 +216,21 @@ void SmtCore::performSplit() if ( level > _statistics->getUnsignedAttribute( Statistics::MAX_DECISION_LEVEL ) ) _statistics->setUnsignedAttribute( Statistics::MAX_DECISION_LEVEL, level ); struct timespec end = TimeUtils::sampleMicro(); - _statistics->incLongAttribute( Statistics::TOTAL_TIME_SMT_CORE_MICRO, + _statistics->incLongAttribute( Statistics::TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO, TimeUtils::timePassed( start, end ) ); } _constraintForSplitting = NULL; } -unsigned SmtCore::getStackDepth() const +unsigned SearchTreeHandler::getStackDepth() const { ASSERT( ( _engine->inSnCMode() || _stack.size() == static_cast( _context.getLevel() ) ) ); return _stack.size(); } -void SmtCore::popContext() +void SearchTreeHandler::popContext() { struct timespec start = TimeUtils::sampleMicro(); _context.pop(); @@ -246,7 +244,7 @@ void SmtCore::popContext() } } -void SmtCore::pushContext() +void SearchTreeHandler::pushContext() { struct timespec start = TimeUtils::sampleMicro(); _context.push(); @@ -260,9 +258,9 @@ void SmtCore::pushContext() } } -bool SmtCore::popSplit() +bool SearchTreeHandler::popSplit() { - SMT_LOG( "Performing a pop" ); + SEARCH_TREE_LOG( "Performing a pop" ); if ( _stack.empty() ) return false; @@ -300,6 +298,7 @@ bool SmtCore::popSplit() { UnsatCertificateNode *certificateNode = _engine->getUNSATCertificateCurrentPointer(); + certificateNode->deleteUnusedLemmas(); _engine->setUNSATCertificateCurrentPointer( certificateNode->getParent() ); } @@ -314,14 +313,17 @@ bool SmtCore::popSplit() throw MarabouError( MarabouError::DEBUGGING_ERROR ); } - SmtStackEntry *stackEntry = _stack.back(); + SearchTreeStackEntry *stackEntry = _stack.back(); + + if ( _engine->shouldProduceProofs() && _engine->getUNSATCertificateCurrentPointer() ) + _engine->getUNSATCertificateCurrentPointer()->deleteUnusedLemmas(); popContext(); _engine->postContextPopHook(); // Restore the state of the engine - SMT_LOG( "\tRestoring engine state..." ); + SEARCH_TREE_LOG( "\tRestoring engine state..." ); _engine->restoreState( *( stackEntry->_engineState ) ); - SMT_LOG( "\tRestoring engine state - DONE" ); + SEARCH_TREE_LOG( "\tRestoring engine state - DONE" ); // Apply the new split and erase it from the list auto split = stackEntry->_alternativeSplits.begin(); @@ -339,6 +341,7 @@ bool SmtCore::popSplit() UnsatCertificateNode *splitChild = certificateNode->getChildBySplit( *split ); while ( !splitChild ) { + certificateNode->deleteUnusedLemmas(); certificateNode = certificateNode->getParent(); ASSERT( certificateNode ); splitChild = certificateNode->getChildBySplit( *split ); @@ -348,12 +351,12 @@ bool SmtCore::popSplit() ASSERT( _engine->getUNSATCertificateCurrentPointer()->getSplit() == *split ); } - SMT_LOG( "\tApplying new split..." ); + SEARCH_TREE_LOG( "\tApplying new split..." ); ASSERT( split->getEquations().size() == 0 ); _engine->preContextPushHook(); pushContext(); _engine->applySplit( *split ); - SMT_LOG( "\tApplying new split - DONE" ); + SEARCH_TREE_LOG( "\tApplying new split - DONE" ); stackEntry->_activeSplit = *split; stackEntry->_alternativeSplits.erase( split ); @@ -371,7 +374,7 @@ bool SmtCore::popSplit() if ( level > _statistics->getUnsignedAttribute( Statistics::MAX_DECISION_LEVEL ) ) _statistics->setUnsignedAttribute( Statistics::MAX_DECISION_LEVEL, level ); struct timespec end = TimeUtils::sampleMicro(); - _statistics->incLongAttribute( Statistics::TOTAL_TIME_SMT_CORE_MICRO, + _statistics->incLongAttribute( Statistics::TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO, TimeUtils::timePassed( start, end ) ); } @@ -380,14 +383,14 @@ bool SmtCore::popSplit() return true; } -void SmtCore::resetSplitConditions() +void SearchTreeHandler::resetSplitConditions() { _constraintToViolationCount.clear(); _numRejectedPhasePatternProposal = 0; _needToSplit = false; } -void SmtCore::recordImpliedValidSplit( PiecewiseLinearCaseSplit &validSplit ) +void SearchTreeHandler::recordImpliedValidSplit( PiecewiseLinearCaseSplit &validSplit ) { if ( _stack.empty() ) _impliedValidSplitsAtRoot.append( validSplit ); @@ -397,7 +400,7 @@ void SmtCore::recordImpliedValidSplit( PiecewiseLinearCaseSplit &validSplit ) checkSkewFromDebuggingSolution(); } -void SmtCore::allSplitsSoFar( List &result ) const +void SearchTreeHandler::allSplitsSoFar( List &result ) const { result.clear(); @@ -412,19 +415,19 @@ void SmtCore::allSplitsSoFar( List &result ) const } } -void SmtCore::setStatistics( Statistics *statistics ) +void SearchTreeHandler::setStatistics( Statistics *statistics ) { _statistics = statistics; } -void SmtCore::storeDebuggingSolution( const Map &debuggingSolution ) +void SearchTreeHandler::storeDebuggingSolution( const Map &debuggingSolution ) { _debuggingSolution = debuggingSolution; } // Return true if stack is currently compliant, false otherwise // If there is no stored solution, return false --- incompliant. -bool SmtCore::checkSkewFromDebuggingSolution() +bool SearchTreeHandler::checkSkewFromDebuggingSolution() { if ( _debuggingSolution.empty() ) return false; @@ -476,8 +479,8 @@ bool SmtCore::checkSkewFromDebuggingSolution() return true; } -bool SmtCore::splitAllowsStoredSolution( const PiecewiseLinearCaseSplit &split, - String &error ) const +bool SearchTreeHandler::splitAllowsStoredSolution( const PiecewiseLinearCaseSplit &split, + String &error ) const { // False if the split prevents one of the values in the stored solution, true otherwise. error = ""; @@ -520,7 +523,7 @@ bool SmtCore::splitAllowsStoredSolution( const PiecewiseLinearCaseSplit &split, return true; } -PiecewiseLinearConstraint *SmtCore::chooseViolatedConstraintForFixing( +PiecewiseLinearConstraint *SearchTreeHandler::chooseViolatedConstraintForFixing( List &_violatedPlConstraints ) const { ASSERT( !_violatedPlConstraints.empty() ); @@ -554,7 +557,7 @@ PiecewiseLinearConstraint *SmtCore::chooseViolatedConstraintForFixing( return candidate; } -void SmtCore::replaySmtStackEntry( SmtStackEntry *stackEntry ) +void SearchTreeHandler::replaySearchTreeStackEntry( SearchTreeStackEntry *stackEntry ) { struct timespec start = TimeUtils::sampleMicro(); @@ -585,22 +588,22 @@ void SmtCore::replaySmtStackEntry( SmtStackEntry *stackEntry ) if ( level > _statistics->getUnsignedAttribute( Statistics::MAX_DECISION_LEVEL ) ) _statistics->setUnsignedAttribute( Statistics::MAX_DECISION_LEVEL, level ); struct timespec end = TimeUtils::sampleMicro(); - _statistics->incLongAttribute( Statistics::TOTAL_TIME_SMT_CORE_MICRO, + _statistics->incLongAttribute( Statistics::TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO, TimeUtils::timePassed( start, end ) ); } } -void SmtCore::storeSmtState( SmtState &smtState ) +void SearchTreeHandler::storeSearchTreeState( SearchTreeState &searchTreeState ) { - smtState._impliedValidSplitsAtRoot = _impliedValidSplitsAtRoot; + searchTreeState._impliedValidSplitsAtRoot = _impliedValidSplitsAtRoot; for ( auto &stackEntry : _stack ) - smtState._stack.append( stackEntry->duplicateSmtStackEntry() ); + searchTreeState._stack.append( stackEntry->duplicateSearchTreeStackEntry() ); - smtState._stateId = _stateId; + searchTreeState._stateId = _stateId; } -bool SmtCore::pickSplitPLConstraint() +bool SearchTreeHandler::pickSplitPLConstraint() { if ( _needToSplit ) { diff --git a/src/engine/SmtCore.h b/src/engine/SearchTreeHandler.h similarity index 79% rename from src/engine/SmtCore.h rename to src/engine/SearchTreeHandler.h index ad1d61f8e9..25f7e56a6e 100644 --- a/src/engine/SmtCore.h +++ b/src/engine/SearchTreeHandler.h @@ -1,5 +1,5 @@ /********************* */ -/*! \file SmtCore.h +/*! \file SearchTreeHandler.h ** \verbatim ** Top contributors (to current version): ** Guy Katz, Parth Shah @@ -13,22 +13,25 @@ **/ -#ifndef __SmtCore_h__ -#define __SmtCore_h__ +#ifndef __SearchTreeHandler_h__ +#define __SearchTreeHandler_h__ #include "DivideStrategy.h" +#include "HashMap.h" #include "PLConstraintScoreTracker.h" #include "PiecewiseLinearCaseSplit.h" #include "PiecewiseLinearConstraint.h" -#include "SmtStackEntry.h" -#include "SmtState.h" +#include "SearchTreeStackEntry.h" +#include "SearchTreeState.h" #include "Stack.h" #include "Statistics.h" +#include "context/cdhashmap.h" #include "context/context.h" #include -#define SMT_LOG( x, ... ) LOG( GlobalConfiguration::SMT_CORE_LOGGING, "SmtCore: %s\n", x ) +#define SEARCH_TREE_LOG( x, ... ) \ + LOG( GlobalConfiguration::SEARCH_TREE_HANDLER_LOGGING, "SearchTreeHandler: %s\n", x ) class EngineState; class IEngine; @@ -36,11 +39,11 @@ class String; using CVC4::context::Context; -class SmtCore +class SearchTreeHandler { public: - SmtCore( IEngine *engine ); - ~SmtCore(); + explicit SearchTreeHandler( IEngine *engine ); + ~SearchTreeHandler(); /* Clear the stack. @@ -48,7 +51,7 @@ class SmtCore void freeMemory(); /* - Reset the SmtCore + Reset the SearchTreeHandler */ void reset(); @@ -58,7 +61,7 @@ class SmtCore void initializeScoreTrackerIfNeeded( const List &plConstraints ); /* - Inform the SMT core that a SoI phase pattern proposal is rejected. + Inform the Search Tree handler that a SoI phase pattern proposal is rejected. */ void reportRejectedPhasePatternProposal(); @@ -80,7 +83,7 @@ class SmtCore } /* - Inform the SMT core that a PL constraint is violated. + Inform the Search Tree handler that a PL constraint is violated. */ void reportViolatedConstraint( PiecewiseLinearConstraint *constraint ); @@ -97,7 +100,7 @@ class SmtCore void resetSplitConditions(); /* - Returns true iff the SMT core wants to perform a case split. + Returns true iff the Search Tree handler wants to perform a case split. */ bool needToSplit() const; @@ -123,30 +126,29 @@ class SmtCore */ void pushContext(); - /* The current stack depth. */ unsigned getStackDepth() const; /* - Let the smt core know of an implied valid case split that was discovered. + Let the search tree handler know of an implied valid case split that was discovered. */ void recordImpliedValidSplit( PiecewiseLinearCaseSplit &validSplit ); /* - Return a list of all splits performed so far, both SMT-originating and valid ones, + Return a list of all splits performed so far, both Search Tree-originating and valid ones, in the correct order. */ void allSplitsSoFar( List &result ) const; /* - Have the SMT core start reporting statistics. + Have the Search Tree handler start reporting statistics. */ void setStatistics( Statistics *statistics ); /* - Have the SMT core choose, among a set of violated PL constraints, which + Have the Search Tree handler choose, among a set of violated PL constraints, which constraint should be repaired (without splitting) */ PiecewiseLinearConstraint *chooseViolatedConstraintForFixing( @@ -160,12 +162,12 @@ class SmtCore /* Replay a stackEntry */ - void replaySmtStackEntry( SmtStackEntry *stackEntry ); + void replaySearchTreeStackEntry( SearchTreeStackEntry *stackEntry ); /* - Store the current state of the SmtCore into smtState + Store the current state of the SearchTreeHandler into searchTreeState */ - void storeSmtState( SmtState &smtState ); + void storeSearchTreeState( SearchTreeState &searchTreeState ); /* Pick the piecewise linear constraint for splitting, returns true @@ -194,7 +196,7 @@ class SmtCore /* The case-split stack. */ - List _stack; + List _stack; /* The engine. @@ -205,6 +207,7 @@ class SmtCore Context for synchronizing the search. */ Context &_context; + /* Do we need to perform a split and on which constraint. */ @@ -247,7 +250,7 @@ class SmtCore /* Heap to store the scores of each PLConstraint. */ - std::unique_ptr _scoreTracker; + std::shared_ptr _scoreTracker; /* Number of times the phase pattern proposal has been rejected at the @@ -256,7 +259,7 @@ class SmtCore unsigned _numRejectedPhasePatternProposal; }; -#endif // __SmtCore_h__ +#endif // __SearchTreeHandler_h__ // // Local Variables: diff --git a/src/engine/SmtStackEntry.h b/src/engine/SearchTreeStackEntry.h similarity index 80% rename from src/engine/SmtStackEntry.h rename to src/engine/SearchTreeStackEntry.h index ec9b3f2ce2..a701d9ffd3 100644 --- a/src/engine/SmtStackEntry.h +++ b/src/engine/SearchTreeStackEntry.h @@ -1,5 +1,5 @@ /********************* */ -/*! \file SmtStackEntry.h +/*! \file SearchTreeStackEntry.h ** \verbatim ** Top contributors (to current version): ** Guy Katz, Haoze Wu @@ -13,8 +13,8 @@ **/ -#ifndef __SmtStackEntry_h__ -#define __SmtStackEntry_h__ +#ifndef __SearchTreeStackEntry_h__ +#define __SearchTreeStackEntry_h__ #include "EngineState.h" #include "PiecewiseLinearCaseSplit.h" @@ -24,7 +24,7 @@ the active split, the alternative splits (in case of backtrack), and also any implied splits that were discovered subsequently. */ -struct SmtStackEntry +struct SearchTreeStackEntry { public: PiecewiseLinearCaseSplit _activeSplit; @@ -33,14 +33,14 @@ struct SmtStackEntry EngineState *_engineState; /* - Create a copy of the SmtStackEntry on the stack and returns a pointer to + Create a copy of the SearchTreeStackEntry on the stack and returns a pointer to the copy. We do not copy the engineState for now, since where this method is called, we recreate the engineState by replaying the caseSplits. */ - SmtStackEntry *duplicateSmtStackEntry() + SearchTreeStackEntry *duplicateSearchTreeStackEntry() { - SmtStackEntry *copy = new SmtStackEntry(); + SearchTreeStackEntry *copy = new SearchTreeStackEntry(); copy->_activeSplit = _activeSplit; copy->_impliedValidSplits = _impliedValidSplits; @@ -51,7 +51,7 @@ struct SmtStackEntry } }; -#endif // __SmtStackEntry_h__ +#endif // __SearchTreeStackEntry_h__ // // Local Variables: diff --git a/src/engine/SmtState.h b/src/engine/SearchTreeState.h similarity index 82% rename from src/engine/SmtState.h rename to src/engine/SearchTreeState.h index d513b67b34..23d0353128 100644 --- a/src/engine/SmtState.h +++ b/src/engine/SearchTreeState.h @@ -1,5 +1,5 @@ /********************* */ -/*! \file SmtState.h +/*! \file SearchTreeState.h ** \verbatim ** Top contributors (to current version): ** Guy Katz, Duligur Ibeling @@ -11,17 +11,17 @@ ** ** [[ Add lengthier description here ]] - **/ +**/ -#ifndef __SmtState_h__ -#define __SmtState_h__ +#ifndef __SearchTreeState_h__ +#define __SearchTreeState_h__ #include "List.h" #include "Map.h" #include "PiecewiseLinearConstraint.h" -#include "SmtStackEntry.h" +#include "SearchTreeStackEntry.h" -class SmtState +class SearchTreeState { public: /* @@ -32,7 +32,7 @@ class SmtState /* The stack. */ - List _stack; + List _stack; /* A unique ID allocated to every state that is stored, for @@ -41,7 +41,7 @@ class SmtState unsigned _stateId; }; -#endif // __SmtState_h__ +#endif // __SearchTreeState_h__ // // Local Variables: diff --git a/src/engine/SignConstraint.cpp b/src/engine/SignConstraint.cpp index 744da2da94..fea9546bc2 100644 --- a/src/engine/SignConstraint.cpp +++ b/src/engine/SignConstraint.cpp @@ -23,7 +23,6 @@ #include "MarabouError.h" #include "PiecewiseLinearCaseSplit.h" #include "Query.h" -#include "SignConstraint.h" #include "Statistics.h" #ifdef _WIN32 @@ -123,7 +122,7 @@ bool SignConstraint::satisfied() const List SignConstraint::getCaseSplits() const { - if ( _phaseStatus != PHASE_NOT_FIXED ) + if ( getPhaseStatus() != PHASE_NOT_FIXED ) throw MarabouError( MarabouError::REQUESTED_CASE_SPLITS_FROM_FIXED_CONSTRAINT ); List splits; @@ -168,7 +167,7 @@ List SignConstraint::getCaseSplits() const List SignConstraint::getAllCases() const { - if ( _phaseStatus != PHASE_NOT_FIXED ) + if ( getPhaseStatus() != PHASE_NOT_FIXED ) throw MarabouError( MarabouError::REQUESTED_CASE_SPLITS_FROM_FIXED_CONSTRAINT ); if ( _direction == SIGN_PHASE_NEGATIVE ) @@ -220,7 +219,7 @@ PiecewiseLinearCaseSplit SignConstraint::getPositiveSplit() const bool SignConstraint::phaseFixed() const { - return _phaseStatus != PHASE_NOT_FIXED; + return getPhaseStatus() != PHASE_NOT_FIXED; } void SignConstraint::addAuxiliaryEquationsAfterPreprocessing( Query &inputQuery ) @@ -276,9 +275,9 @@ void SignConstraint::addAuxiliaryEquationsAfterPreprocessing( Query &inputQuery PiecewiseLinearCaseSplit SignConstraint::getImpliedCaseSplit() const { - ASSERT( _phaseStatus != PHASE_NOT_FIXED ); + ASSERT( getPhaseStatus() != PHASE_NOT_FIXED ); - if ( _phaseStatus == PhaseStatus::SIGN_PHASE_POSITIVE ) + if ( getPhaseStatus() == PhaseStatus::SIGN_PHASE_POSITIVE ) return getPositiveSplit(); return getNegativeSplit(); @@ -403,9 +402,9 @@ void SignConstraint::notifyLowerBound( unsigned variable, double bound ) throw InfeasibleQueryException(); _boundManager->addLemmaExplanationAndTightenBound( - _f, 1, Tightening::LB, { variable }, Tightening::LB, getType() ); + _f, 1, Tightening::LB, { variable }, Tightening::LB, *this, true, bound ); _boundManager->addLemmaExplanationAndTightenBound( - _b, 0, Tightening::LB, { variable }, Tightening::LB, getType() ); + _b, 0, Tightening::LB, { variable }, Tightening::LB, *this, false, bound ); } else { @@ -421,7 +420,7 @@ void SignConstraint::notifyLowerBound( unsigned variable, double bound ) { if ( _boundManager->shouldProduceProofs() ) _boundManager->addLemmaExplanationAndTightenBound( - _f, 1, Tightening::LB, { variable }, Tightening::LB, getType() ); + _f, 1, Tightening::LB, { variable }, Tightening::LB, *this, true, bound ); else _boundManager->tightenLowerBound( _f, 1 ); } @@ -453,9 +452,9 @@ void SignConstraint::notifyUpperBound( unsigned variable, double bound ) throw InfeasibleQueryException(); _boundManager->addLemmaExplanationAndTightenBound( - _f, -1, Tightening::UB, { variable }, Tightening::UB, getType() ); + _f, -1, Tightening::UB, { variable }, Tightening::UB, *this, true, bound ); _boundManager->addLemmaExplanationAndTightenBound( - _b, 0, Tightening::UB, { variable }, Tightening::UB, getType() ); + _b, 0, Tightening::UB, { variable }, Tightening::UB, *this, false, bound ); } else { @@ -471,7 +470,7 @@ void SignConstraint::notifyUpperBound( unsigned variable, double bound ) { if ( _boundManager->shouldProduceProofs() ) _boundManager->addLemmaExplanationAndTightenBound( - _f, -1, Tightening::UB, { variable }, Tightening::UB, getType() ); + _f, -1, Tightening::UB, { variable }, Tightening::UB, *this, true, bound ); else _boundManager->tightenUpperBound( _f, -1 ); } @@ -572,22 +571,22 @@ void SignConstraint::eliminateVariable( __attribute__( ( unused ) ) unsigned var if ( FloatUtils::areEqual( fixedValue, 1 ) ) { - ASSERT( _phaseStatus != SIGN_PHASE_NEGATIVE ); + ASSERT( getPhaseStatus() != SIGN_PHASE_NEGATIVE ); } else if ( FloatUtils::areEqual( fixedValue, -1 ) ) { - ASSERT( _phaseStatus != SIGN_PHASE_POSITIVE ); + ASSERT( getPhaseStatus() != SIGN_PHASE_POSITIVE ); } } else if ( variable == _b ) { if ( FloatUtils::gte( fixedValue, 0 ) ) { - ASSERT( _phaseStatus != SIGN_PHASE_NEGATIVE ); + ASSERT( getPhaseStatus() != SIGN_PHASE_NEGATIVE ); } else if ( FloatUtils::lt( fixedValue, 0 ) ) { - ASSERT( _phaseStatus != SIGN_PHASE_POSITIVE ); + ASSERT( getPhaseStatus() != SIGN_PHASE_POSITIVE ); } } } ); @@ -612,8 +611,8 @@ void SignConstraint::dump( String &output ) const _f, _b, _constraintActive ? "Yes" : "No", - _phaseStatus, - phaseToString( _phaseStatus ).ascii() ); + getPhaseStatus(), + phaseToString( getPhaseStatus() ).ascii() ); output += Stringf( "b in [%s, %s], ", diff --git a/src/engine/SubQuery.h b/src/engine/SubQuery.h index 7f4a12940b..c8239b5869 100644 --- a/src/engine/SubQuery.h +++ b/src/engine/SubQuery.h @@ -19,7 +19,7 @@ #include "List.h" #include "MString.h" #include "PiecewiseLinearCaseSplit.h" -#include "SmtState.h" +#include "SearchTreeState.h" #include #include @@ -33,7 +33,7 @@ struct SubQuery String _queryId; std::unique_ptr _split; - std::unique_ptr _smtState; + std::unique_ptr _searchTreeState; unsigned _timeoutInSeconds; unsigned _depth; }; diff --git a/src/engine/Tableau.cpp b/src/engine/Tableau.cpp index 21fe242875..70ea584545 100644 --- a/src/engine/Tableau.cpp +++ b/src/engine/Tableau.cpp @@ -2538,7 +2538,28 @@ void Tableau::getColumnOfBasis( unsigned column, SparseUnsortedList *result ) co void Tableau::refreshBasisFactorization() { - _basisFactorization->obtainFreshBasis(); + try + { + _basisFactorization->obtainFreshBasis(); + } + catch ( const MalformedBasisException & ) + { + ConstraintMatrixAnalyzer analyzer; + analyzer.analyze( (const SparseUnsortedList **)_sparseRowsOfA, _m, _n ); + List independentColumns = analyzer.getIndependentColumns(); + + try + { + initializeTableau( independentColumns ); + } + catch ( MalformedBasisException & ) + { + TABLEAU_LOG( "refreshBasisFactorization failed - could not refactorize basis" ); + throw MarabouError( MarabouError::FAILURE_TO_ADD_NEW_EQUATION ); + } + + computeCostFunction(); + } } unsigned Tableau::getVariableAfterMerging( unsigned variable ) const diff --git a/src/engine/tests/MockBoundManager.h b/src/engine/tests/MockBoundManager.h index 324ccad18e..584ad490c3 100644 --- a/src/engine/tests/MockBoundManager.h +++ b/src/engine/tests/MockBoundManager.h @@ -268,7 +268,9 @@ class MockBoundManager : public IBoundManager Tightening::BoundType /* affectedVarBound */, const List & /* causingVar */, Tightening::BoundType /* causingVarBound */, - PiecewiseLinearFunctionType /* constraintType */ ) + PiecewiseLinearConstraint /* constraintType */ &, + bool /* isPhaseFixing*/, + double /* minTargetBound */ ) { return true; } diff --git a/src/engine/tests/MockEngine.h b/src/engine/tests/MockEngine.h index 4bab581301..93c9daec1a 100644 --- a/src/engine/tests/MockEngine.h +++ b/src/engine/tests/MockEngine.h @@ -22,6 +22,8 @@ #include "PiecewiseLinearConstraint.h" #include "context/context.h" +#include + class String; class MockEngine : public IEngine @@ -93,6 +95,7 @@ class MockEngine : public IEngine } void postContextPopHook(){}; + void preContextPushHook(){}; mutable EngineState *lastStoredState; @@ -154,17 +157,17 @@ class MockEngine : public IEngine { } - mutable SmtState *lastRestoredSmtState; - bool restoreSmtState( SmtState &smtState ) + mutable SearchTreeState *lastRestoredSearchTreeState; + bool restoreSearchTreeState( SearchTreeState &searchTreeState ) { - lastRestoredSmtState = &smtState; + lastRestoredSearchTreeState = &searchTreeState; return true; } - mutable SmtState *lastStoredSmtState; - void storeSmtState( SmtState &smtState ) + mutable SearchTreeState *lastStoredSearchTreeState; + void storeSearchTreeState( SearchTreeState &searchTreeState ) { - lastStoredSmtState = &smtState; + lastStoredSearchTreeState = &searchTreeState; } List _constraintsToSplit; @@ -233,13 +236,9 @@ class MockEngine : public IEngine return 0.0; } - void updateGroundUpperBound( unsigned /* var */, double /* value */ ) - { - } + void updateGroundUpperBound( unsigned /* var */, double /* value */ ){}; - void updateGroundLowerBound( unsigned /*var*/, double /*value*/ ) - { - } + void updateGroundLowerBound( unsigned /*var*/, double /*value*/ ){}; double getGroundBound( unsigned /*var*/, bool /*isUpper*/ ) const { @@ -251,9 +250,7 @@ class MockEngine : public IEngine return NULL; } - void setUNSATCertificateCurrentPointer( UnsatCertificateNode * /* node*/ ) - { - } + void setUNSATCertificateCurrentPointer( UnsatCertificateNode * /* node*/ ){}; const UnsatCertificateNode *getUNSATCertificateRoot() const { @@ -265,18 +262,14 @@ class MockEngine : public IEngine return true; } - void explainSimplexFailure() - { - } + void explainSimplexFailure(){}; const BoundExplainer *getBoundExplainer() const { return NULL; } - void setBoundExplainerContent( BoundExplainer * /*boundExplainer */ ) - { - } + void setBoundExplainerContent( BoundExplainer * /*boundExplainer */ ){}; void propagateBoundManagerTightenings() { @@ -287,9 +280,12 @@ class MockEngine : public IEngine return true; } - void addPLCLemma( std::shared_ptr & /*explanation*/ ) + const List *getPiecewiseLinearConstraints() const { + return NULL; } + + void incNumOfLemmas(){}; }; #endif // __MockEngine_h__ diff --git a/src/engine/tests/Test_SmtCore.h b/src/engine/tests/Test_SearchTreeHandler.h similarity index 75% rename from src/engine/tests/Test_SmtCore.h rename to src/engine/tests/Test_SearchTreeHandler.h index eae4dea4c0..a0380aba25 100644 --- a/src/engine/tests/Test_SmtCore.h +++ b/src/engine/tests/Test_SearchTreeHandler.h @@ -1,5 +1,5 @@ /********************* */ -/*! \file Test_SmtCore.h +/*! \file Test_SearchTreeHandler.h ** \verbatim ** Top contributors (to current version): ** Guy Katz, Duligur Ibeling, Derek Huang @@ -11,7 +11,7 @@ ** ** [[ Add lengthier description here ]] - **/ +**/ #include "MockEngine.h" #include "MockErrno.h" @@ -19,20 +19,20 @@ #include "PiecewiseLinearConstraint.h" #include "Query.h" #include "ReluConstraint.h" -#include "SmtCore.h" +#include "SearchTreeHandler.h" #include #include -class MockForSmtCore +class MockForSearchTreeHandler { public: }; -class SmtCoreTestSuite : public CxxTest::TestSuite +class SearchTreeHandlerTestSuite : public CxxTest::TestSuite { public: - MockForSmtCore *mock; + MockForSearchTreeHandler *mock; MockEngine *engine; class MockConstraint : public PiecewiseLinearConstraint @@ -180,7 +180,7 @@ class SmtCoreTestSuite : public CxxTest::TestSuite void setUp() { - TS_ASSERT( mock = new MockForSmtCore ); + TS_ASSERT( mock = new MockForSearchTreeHandler ); TS_ASSERT( engine = new MockEngine ); } @@ -195,25 +195,25 @@ class SmtCoreTestSuite : public CxxTest::TestSuite ReluConstraint constraint1( 1, 2 ); ReluConstraint constraint2( 3, 4 ); - SmtCore smtCore( engine ); + SearchTreeHandler searchTreeHandler( engine ); for ( unsigned i = 0; i < (unsigned)Options::get()->getInt( Options::CONSTRAINT_VIOLATION_THRESHOLD ) - 1; ++i ) { - smtCore.reportViolatedConstraint( &constraint1 ); - TS_ASSERT( !smtCore.needToSplit() ); - smtCore.reportViolatedConstraint( &constraint2 ); - TS_ASSERT( !smtCore.needToSplit() ); + searchTreeHandler.reportViolatedConstraint( &constraint1 ); + TS_ASSERT( !searchTreeHandler.needToSplit() ); + searchTreeHandler.reportViolatedConstraint( &constraint2 ); + TS_ASSERT( !searchTreeHandler.needToSplit() ); } - smtCore.reportViolatedConstraint( &constraint2 ); - TS_ASSERT( smtCore.needToSplit() ); + searchTreeHandler.reportViolatedConstraint( &constraint2 ); + TS_ASSERT( searchTreeHandler.needToSplit() ); } void test_perform_split() { - SmtCore smtCore( engine ); + SearchTreeHandler searchTreeHandler( engine ); MockConstraint constraint; @@ -247,18 +247,18 @@ class SmtCoreTestSuite : public CxxTest::TestSuite for ( unsigned i = 0; i < (unsigned)Options::get()->getInt( Options::CONSTRAINT_VIOLATION_THRESHOLD ); ++i ) - smtCore.reportViolatedConstraint( &constraint ); + searchTreeHandler.reportViolatedConstraint( &constraint ); engine->lastStoredState = NULL; engine->lastRestoredState = NULL; - TS_ASSERT( smtCore.needToSplit() ); - TS_ASSERT_EQUALS( smtCore.getStackDepth(), 0U ); + TS_ASSERT( searchTreeHandler.needToSplit() ); + TS_ASSERT_EQUALS( searchTreeHandler.getStackDepth(), 0U ); TS_ASSERT( !constraint.setActiveWasCalled ); - TS_ASSERT_THROWS_NOTHING( smtCore.performSplit() ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.performSplit() ); TS_ASSERT( constraint.setActiveWasCalled ); - TS_ASSERT( !smtCore.needToSplit() ); - TS_ASSERT_EQUALS( smtCore.getStackDepth(), 1U ); + TS_ASSERT( !searchTreeHandler.needToSplit() ); + TS_ASSERT_EQUALS( searchTreeHandler.getStackDepth(), 1U ); // Check that Split1 was performed and tableau state was stored TS_ASSERT_EQUALS( engine->lastLowerBounds.size(), 1U ); @@ -280,8 +280,8 @@ class SmtCoreTestSuite : public CxxTest::TestSuite // Pop Split1, check that the tableau was restored and that // a Split2 was performed - TS_ASSERT( smtCore.popSplit() ); - TS_ASSERT_EQUALS( smtCore.getStackDepth(), 1U ); + TS_ASSERT( searchTreeHandler.popSplit() ); + TS_ASSERT_EQUALS( searchTreeHandler.getStackDepth(), 1U ); TS_ASSERT_EQUALS( engine->lastRestoredState, originalState ); TS_ASSERT( !engine->lastStoredState ); @@ -304,8 +304,8 @@ class SmtCoreTestSuite : public CxxTest::TestSuite // Pop Split2, check that the tableau was restored and that // a Split3 was performed - TS_ASSERT( smtCore.popSplit() ); - TS_ASSERT_EQUALS( smtCore.getStackDepth(), 1U ); + TS_ASSERT( searchTreeHandler.popSplit() ); + TS_ASSERT_EQUALS( searchTreeHandler.getStackDepth(), 1U ); TS_ASSERT_EQUALS( engine->lastRestoredState, originalState ); TS_ASSERT( !engine->lastStoredState ); @@ -324,14 +324,14 @@ class SmtCoreTestSuite : public CxxTest::TestSuite engine->lastEquations.clear(); // Final pop - TS_ASSERT( !smtCore.popSplit() ); + TS_ASSERT( !searchTreeHandler.popSplit() ); TS_ASSERT( !engine->lastRestoredState ); - TS_ASSERT_EQUALS( smtCore.getStackDepth(), 0U ); + TS_ASSERT_EQUALS( searchTreeHandler.getStackDepth(), 0U ); } void test_perform_split__inactive_constraint() { - SmtCore smtCore( engine ); + SearchTreeHandler searchTreeHandler( engine ); MockConstraint constraint; @@ -365,13 +365,13 @@ class SmtCoreTestSuite : public CxxTest::TestSuite for ( unsigned i = 0; i < (unsigned)Options::get()->getInt( Options::CONSTRAINT_VIOLATION_THRESHOLD ); ++i ) - smtCore.reportViolatedConstraint( &constraint ); + searchTreeHandler.reportViolatedConstraint( &constraint ); constraint.nextIsActive = false; - TS_ASSERT( smtCore.needToSplit() ); - TS_ASSERT_THROWS_NOTHING( smtCore.performSplit() ); - TS_ASSERT( !smtCore.needToSplit() ); + TS_ASSERT( searchTreeHandler.needToSplit() ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.performSplit() ); + TS_ASSERT( !searchTreeHandler.needToSplit() ); // Check that no split was performed @@ -383,7 +383,7 @@ class SmtCoreTestSuite : public CxxTest::TestSuite void test_all_splits_so_far() { - SmtCore smtCore( engine ); + SearchTreeHandler searchTreeHandler( engine ); MockConstraint constraint; @@ -410,13 +410,13 @@ class SmtCoreTestSuite : public CxxTest::TestSuite for ( unsigned i = 0; i < (unsigned)Options::get()->getInt( Options::CONSTRAINT_VIOLATION_THRESHOLD ); ++i ) - smtCore.reportViolatedConstraint( &constraint ); + searchTreeHandler.reportViolatedConstraint( &constraint ); constraint.nextIsActive = true; - TS_ASSERT( smtCore.needToSplit() ); - TS_ASSERT_THROWS_NOTHING( smtCore.performSplit() ); - TS_ASSERT( !smtCore.needToSplit() ); + TS_ASSERT( searchTreeHandler.needToSplit() ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.performSplit() ); + TS_ASSERT( !searchTreeHandler.needToSplit() ); // Register a valid split @@ -424,7 +424,7 @@ class SmtCoreTestSuite : public CxxTest::TestSuite PiecewiseLinearCaseSplit split3; Tightening bound5( 14, 2.3, Tightening::LB ); - TS_ASSERT_THROWS_NOTHING( smtCore.recordImpliedValidSplit( split3 ) ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.recordImpliedValidSplit( split3 ) ); // Do another real split @@ -445,17 +445,17 @@ class SmtCoreTestSuite : public CxxTest::TestSuite for ( unsigned i = 0; i < (unsigned)Options::get()->getInt( Options::CONSTRAINT_VIOLATION_THRESHOLD ); ++i ) - smtCore.reportViolatedConstraint( &constraint2 ); + searchTreeHandler.reportViolatedConstraint( &constraint2 ); constraint2.nextIsActive = true; - TS_ASSERT( smtCore.needToSplit() ); - TS_ASSERT_THROWS_NOTHING( smtCore.performSplit() ); - TS_ASSERT( !smtCore.needToSplit() ); + TS_ASSERT( searchTreeHandler.needToSplit() ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.performSplit() ); + TS_ASSERT( !searchTreeHandler.needToSplit() ); // Check that everything is received in the correct order List allSplitsSoFar; - TS_ASSERT_THROWS_NOTHING( smtCore.allSplitsSoFar( allSplitsSoFar ) ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.allSplitsSoFar( allSplitsSoFar ) ); TS_ASSERT_EQUALS( allSplitsSoFar.size(), 3U ); @@ -469,7 +469,7 @@ class SmtCoreTestSuite : public CxxTest::TestSuite TS_ASSERT_EQUALS( *it, split4 ); } - void test_store_smt_state() + void test_store_search_tree_state() { // ReLU(x0, x1) // ReLU(x2, x3) @@ -484,89 +484,89 @@ class SmtCoreTestSuite : public CxxTest::TestSuite relu1.transformToUseAuxVariables( inputQuery ); relu2.transformToUseAuxVariables( inputQuery ); - SmtCore smtCore( engine ); + SearchTreeHandler searchTreeHandler( engine ); PiecewiseLinearCaseSplit split1; Tightening bound1( 1, 0.5, Tightening::LB ); split1.storeBoundTightening( bound1 ); - TS_ASSERT_THROWS_NOTHING( smtCore.recordImpliedValidSplit( split1 ) ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.recordImpliedValidSplit( split1 ) ); for ( unsigned i = 0; i < (unsigned)Options::get()->getInt( Options::CONSTRAINT_VIOLATION_THRESHOLD ); ++i ) - smtCore.reportViolatedConstraint( &relu1 ); + searchTreeHandler.reportViolatedConstraint( &relu1 ); - TS_ASSERT( smtCore.needToSplit() ); - TS_ASSERT_THROWS_NOTHING( smtCore.performSplit() ); - TS_ASSERT( !smtCore.needToSplit() ); + TS_ASSERT( searchTreeHandler.needToSplit() ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.performSplit() ); + TS_ASSERT( !searchTreeHandler.needToSplit() ); PiecewiseLinearCaseSplit split2; Tightening bound2( 3, 2.3, Tightening::UB ); split2.storeBoundTightening( bound2 ); - TS_ASSERT_THROWS_NOTHING( smtCore.recordImpliedValidSplit( split2 ) ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.recordImpliedValidSplit( split2 ) ); for ( unsigned i = 0; i < (unsigned)Options::get()->getInt( Options::CONSTRAINT_VIOLATION_THRESHOLD ); ++i ) - smtCore.reportViolatedConstraint( &relu2 ); + searchTreeHandler.reportViolatedConstraint( &relu2 ); - TS_ASSERT( smtCore.needToSplit() ); - TS_ASSERT_THROWS_NOTHING( smtCore.performSplit() ); - TS_ASSERT( !smtCore.needToSplit() ); + TS_ASSERT( searchTreeHandler.needToSplit() ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.performSplit() ); + TS_ASSERT( !searchTreeHandler.needToSplit() ); - SmtState smtState; - smtCore.storeSmtState( smtState ); - TS_ASSERT( smtState._impliedValidSplitsAtRoot.size() == 1 ); - TS_ASSERT( *smtState._impliedValidSplitsAtRoot.begin() == split1 ); + SearchTreeState searchTreeState; + searchTreeHandler.storeSearchTreeState( searchTreeState ); + TS_ASSERT( searchTreeState._impliedValidSplitsAtRoot.size() == 1 ); + TS_ASSERT( *searchTreeState._impliedValidSplitsAtRoot.begin() == split1 ); - TS_ASSERT( smtState._stack.size() == 2 ); + TS_ASSERT( searchTreeState._stack.size() == 2 ); // Examine the first stackEntry - SmtStackEntry *stackEntry = *( smtState._stack.begin() ); + SearchTreeStackEntry *stackEntry = *( searchTreeState._stack.begin() ); TS_ASSERT( stackEntry->_activeSplit == *( relu1.getCaseSplits().begin() ) ); TS_ASSERT( *( stackEntry->_alternativeSplits.begin() ) == *( ++relu1.getCaseSplits().begin() ) ); TS_ASSERT( stackEntry->_impliedValidSplits.size() == 1 ); TS_ASSERT( *( stackEntry->_impliedValidSplits.begin() ) == split2 ); // Examine the second stackEntry - stackEntry = *( ++smtState._stack.begin() ); + stackEntry = *( ++searchTreeState._stack.begin() ); TS_ASSERT( stackEntry->_activeSplit == *( relu2.getCaseSplits().begin() ) ); TS_ASSERT( *( stackEntry->_alternativeSplits.begin() ) == *( ++relu2.getCaseSplits().begin() ) ); TS_ASSERT( stackEntry->_impliedValidSplits.size() == 0 ); - clearSmtState( smtState ); + clearSearchTreeState( searchTreeState ); - TS_ASSERT_THROWS_NOTHING( smtCore.popSplit() ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.popSplit() ); - smtCore.storeSmtState( smtState ); - TS_ASSERT( smtState._impliedValidSplitsAtRoot.size() == 1 ); - TS_ASSERT( *smtState._impliedValidSplitsAtRoot.begin() == split1 ); + searchTreeHandler.storeSearchTreeState( searchTreeState ); + TS_ASSERT( searchTreeState._impliedValidSplitsAtRoot.size() == 1 ); + TS_ASSERT( *searchTreeState._impliedValidSplitsAtRoot.begin() == split1 ); - TS_ASSERT( smtState._stack.size() == 2 ); + TS_ASSERT( searchTreeState._stack.size() == 2 ); // Examine the first stackEntry - stackEntry = *( smtState._stack.begin() ); + stackEntry = *( searchTreeState._stack.begin() ); TS_ASSERT( stackEntry->_activeSplit == *( relu1.getCaseSplits().begin() ) ); TS_ASSERT( *( stackEntry->_alternativeSplits.begin() ) == *( ++relu1.getCaseSplits().begin() ) ); TS_ASSERT( stackEntry->_impliedValidSplits.size() == 1 ); TS_ASSERT( *( stackEntry->_impliedValidSplits.begin() ) == split2 ); // Examine the second stackEntry - stackEntry = *( ++smtState._stack.begin() ); + stackEntry = *( ++searchTreeState._stack.begin() ); TS_ASSERT( stackEntry->_activeSplit == *( ++relu2.getCaseSplits().begin() ) ); TS_ASSERT( stackEntry->_alternativeSplits.empty() ); TS_ASSERT( stackEntry->_impliedValidSplits.size() == 0 ); - clearSmtState( smtState ); + clearSearchTreeState( searchTreeState ); - TS_ASSERT_THROWS_NOTHING( smtCore.popSplit() ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.popSplit() ); } - void clearSmtState( SmtState &smtState ) + void clearSearchTreeState( SearchTreeState &searchTreeState ) { - for ( const auto &stackEntry : smtState._stack ) + for ( const auto &stackEntry : searchTreeState._stack ) delete stackEntry; - smtState._stack = List(); - smtState._impliedValidSplitsAtRoot = List(); + searchTreeState._stack = List(); + searchTreeState._impliedValidSplitsAtRoot = List(); } void test_todo() diff --git a/src/nlr/CMakeLists.txt b/src/nlr/CMakeLists.txt index ef89c9492d..89d353aa26 100644 --- a/src/nlr/CMakeLists.txt +++ b/src/nlr/CMakeLists.txt @@ -18,10 +18,10 @@ network_level_reasoner_add_unit_test(DeepPolyAnalysis) network_level_reasoner_add_unit_test(NetworkLevelReasoner) network_level_reasoner_add_unit_test(WsLayerElimination) network_level_reasoner_add_unit_test(ParallelSolver) -network_level_reasoner_add_unit_test(PMNRSelection) if (${ENABLE_GUROBI}) network_level_reasoner_add_unit_test(LPRelaxation) + network_level_reasoner_add_unit_test(PMNRSelection) network_level_reasoner_add_unit_test(PMNR) endif() diff --git a/src/nlr/DeepPolyAbsoluteValueElement.cpp b/src/nlr/DeepPolyAbsoluteValueElement.cpp index 7d26b37354..f2b7274114 100644 --- a/src/nlr/DeepPolyAbsoluteValueElement.cpp +++ b/src/nlr/DeepPolyAbsoluteValueElement.cpp @@ -107,9 +107,8 @@ void DeepPolyAbsoluteValueElement::storePredecessorSymbolicBounds() { for ( unsigned i = 0; i < _size; ++i ) { - NeuronIndex sourceIndex = *( _layer->getActivationSources( i ).begin() ); - ( *_predecessorSymbolicLb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicLb[i]; - ( *_predecessorSymbolicUb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicUb[i]; + ( *_predecessorSymbolicLb )[_layerIndex][i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][i] = _symbolicUb[i]; ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; } diff --git a/src/nlr/DeepPolyBilinearElement.cpp b/src/nlr/DeepPolyBilinearElement.cpp index a937be7012..b1c8b5f80f 100644 --- a/src/nlr/DeepPolyBilinearElement.cpp +++ b/src/nlr/DeepPolyBilinearElement.cpp @@ -120,7 +120,7 @@ void DeepPolyBilinearElement::execute( // Upper bound: out <= aUpper * x + bUpper * y + c_u, where // aUpper = alpha2 * u_y + ( 1 - alpha2 ) * l_y // bUpper = alpha2 * l_x + ( 1 - alpha2 ) * u_x - // c_u = -alpha2 * -l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + // c_u = -alpha2 * l_x * u_y - ( 1 - alpha2 ) * u_x * l_y _symbolicLbA[i] = coeffs[0] * sourceLbs[1] + ( 1 - coeffs[0] ) * sourceUbs[1]; _symbolicUbA[i] = coeffs[1] * sourceUbs[1] + ( 1 - coeffs[1] ) * sourceLbs[1]; @@ -154,15 +154,10 @@ void DeepPolyBilinearElement::storePredecessorSymbolicBounds() { for ( unsigned i = 0; i < _size; ++i ) { - List sources = _layer->getActivationSources( i ); - ( *_predecessorSymbolicLb )[_layerIndex][_size * sources.begin()->_neuron + i] = - _symbolicLbA[i]; - ( *_predecessorSymbolicUb )[_layerIndex][_size * sources.begin()->_neuron + i] = - _symbolicUbA[i]; - ( *_predecessorSymbolicLb )[_layerIndex][_size * sources.back()._neuron + i] = - _symbolicLbB[i]; - ( *_predecessorSymbolicUb )[_layerIndex][_size * sources.back()._neuron + i] = - _symbolicUbB[i]; + ( *_predecessorSymbolicLb )[_layerIndex][i] = _symbolicLbA[i]; + ( *_predecessorSymbolicUb )[_layerIndex][i] = _symbolicUbA[i]; + ( *_predecessorSymbolicLb )[_layerIndex][_size + i] = _symbolicLbB[i]; + ( *_predecessorSymbolicUb )[_layerIndex][_size + i] = _symbolicUbB[i]; ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; } diff --git a/src/nlr/DeepPolyElement.cpp b/src/nlr/DeepPolyElement.cpp index 33ec43cfc9..0652047f50 100644 --- a/src/nlr/DeepPolyElement.cpp +++ b/src/nlr/DeepPolyElement.cpp @@ -1,297 +1,282 @@ -/********************* */ -/*! \file DeepPolyElement.cpp - ** \verbatim - ** Top contributors (to current version): - ** Haoze Andrew Wu - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - -**/ - -#include "DeepPolyElement.h" - -namespace NLR { - -DeepPolyElement::DeepPolyElement() - : _layer( NULL ) - , _size( 0 ) - , _layerIndex( 0 ) - , _storeOutputSymbolicBounds( false ) - , _storePredecessorSymbolicBounds( false ) - , _useParameterisedSBT( false ) - , _layerIndicesToParameters( NULL ) - , _outputLayerSize( 0 ) - , _symbolicLb( NULL ) - , _symbolicUb( NULL ) - , _symbolicLowerBias( NULL ) - , _symbolicUpperBias( NULL ) - , _lb( NULL ) - , _ub( NULL ) - , _work1SymbolicLb( NULL ) - , _work1SymbolicUb( NULL ) - , _work2SymbolicLb( NULL ) - , _work2SymbolicUb( NULL ) - , _workSymbolicLowerBias( NULL ) - , _workSymbolicUpperBias( NULL ){}; - -unsigned DeepPolyElement::getSize() const -{ - return _size; -} - -unsigned DeepPolyElement::getLayerIndex() const -{ - return _layerIndex; -} - -Layer::Type DeepPolyElement::getLayerType() const -{ - return _layer->getLayerType(); -} - -bool DeepPolyElement::hasPredecessor() -{ - return !_layer->getSourceLayers().empty(); -} - -const Map &DeepPolyElement::getPredecessorIndices() const -{ - const Map &sourceLayers = _layer->getSourceLayers(); - return sourceLayers; -} - -double *DeepPolyElement::getSymbolicLb() const -{ - return _symbolicLb; -} - -double *DeepPolyElement::getSymbolicUb() const -{ - return _symbolicUb; -} - -double *DeepPolyElement::getSymbolicLowerBias() const -{ - return _symbolicLowerBias; -} - -double *DeepPolyElement::getSymbolicUpperBias() const -{ - return _symbolicUpperBias; -} - -double DeepPolyElement::getLowerBound( unsigned index ) const -{ - ASSERT( index < getSize() ); - return _lb[index]; -} - -double DeepPolyElement::getUpperBound( unsigned index ) const -{ - ASSERT( index < getSize() ); - return _ub[index]; -} - -void DeepPolyElement::setStoreOutputSymbolicBounds( bool storeOutputSymbolicBounds ) -{ - _storeOutputSymbolicBounds = storeOutputSymbolicBounds; -} - -void DeepPolyElement::setStorePredecessorSymbolicBounds( bool storePredecessorSymbolicBounds ) -{ - _storePredecessorSymbolicBounds = storePredecessorSymbolicBounds; -} - -void DeepPolyElement::setUseParameterisedSBT( bool useParameterisedSBT ) -{ - _useParameterisedSBT = useParameterisedSBT; -} - -void DeepPolyElement::setLayerIndicesToParameters( - Map> *layerIndicesToParameters ) -{ - _layerIndicesToParameters = layerIndicesToParameters; -} - -void DeepPolyElement::setOutputLayerSize( unsigned outputLayerSize ) -{ - _outputLayerSize = outputLayerSize; -} - -double DeepPolyElement::getLowerBoundFromLayer( unsigned index ) const -{ - ASSERT( index < getSize() ); - return _layer->getLb( index ); -} - -double DeepPolyElement::getUpperBoundFromLayer( unsigned index ) const -{ - ASSERT( index < getSize() ); - return _layer->getUb( index ); -} - -void DeepPolyElement::getConcreteBounds() -{ - unsigned size = getSize(); - for ( unsigned i = 0; i < size; ++i ) - { - _lb[i] = _layer->getLb( i ); - _ub[i] = _layer->getUb( i ); - } -} - -void DeepPolyElement::allocateMemory() -{ - freeMemoryIfNeeded(); - - unsigned size = getSize(); - _lb = new double[size]; - _ub = new double[size]; - - std::fill_n( _lb, size, FloatUtils::negativeInfinity() ); - std::fill_n( _ub, size, FloatUtils::infinity() ); -} - -void DeepPolyElement::freeMemoryIfNeeded() -{ - if ( _lb ) - { - delete[] _lb; - _lb = NULL; - } - - if ( _ub ) - { - delete[] _ub; - _ub = NULL; - } -} - -void DeepPolyElement::setWorkingMemory( double *work1SymbolicLb, - double *work1SymbolicUb, - double *work2SymbolicLb, - double *work2SymbolicUb, - double *workSymbolicLowerBias, - double *workSymbolicUpperBias ) -{ - _work1SymbolicLb = work1SymbolicLb; - _work1SymbolicUb = work1SymbolicUb; - _work2SymbolicLb = work2SymbolicLb; - _work2SymbolicUb = work2SymbolicUb; - _workSymbolicLowerBias = workSymbolicLowerBias; - _workSymbolicUpperBias = workSymbolicUpperBias; -} - -void DeepPolyElement::setSymbolicBoundsMemory( - Map> *outputSymbolicLb, - Map> *outputSymbolicUb, - Map> *outputSymbolicLowerBias, - Map> *outputSymbolicUpperBias, - Map> *predecessorSymbolicLb, - Map> *predecessorSymbolicUb, - Map> *predecessorSymbolicLowerBias, - Map> *predecessorSymbolicUpperBias ) -{ - _outputSymbolicLb = outputSymbolicLb; - _outputSymbolicUb = outputSymbolicUb; - _outputSymbolicLowerBias = outputSymbolicLowerBias; - _outputSymbolicUpperBias = outputSymbolicUpperBias; - _predecessorSymbolicLb = predecessorSymbolicLb; - _predecessorSymbolicUb = predecessorSymbolicUb; - _predecessorSymbolicLowerBias = predecessorSymbolicLowerBias; - _predecessorSymbolicUpperBias = predecessorSymbolicUpperBias; -} - -void DeepPolyElement::storeOutputSymbolicBounds( - double *work1SymbolicLb, - double *work1SymbolicUb, - double *workSymbolicLowerBias, - double *workSymbolicUpperBias, - Map &residualLb, - Map &residualUb, - Set &residualLayerIndices, - const Map &deepPolyElementsBefore ) -{ - for ( unsigned i = 0; i < _size; ++i ) - { - if ( _layer->neuronEliminated( i ) ) - { - double value = _layer->getEliminatedNeuronValue( i ); - for ( unsigned j = 0; j < _outputLayerSize; ++j ) - { - workSymbolicLowerBias[i] += work1SymbolicLb[i * _size + j] * value; - workSymbolicUpperBias[i] += work1SymbolicUb[i * _size + j] * value; - work1SymbolicLb[i * _size + j] = 0; - work1SymbolicUb[i * _size + j] = 0; - } - } - } - - Vector symbolicLowerBiasConcretizedResiduals( _outputLayerSize, 0 ); - Vector symbolicUpperBiasConcretizedResiduals( _outputLayerSize, 0 ); - for ( unsigned i = 0; i < _outputLayerSize; ++i ) - { - symbolicLowerBiasConcretizedResiduals[i] = workSymbolicLowerBias[i]; - symbolicUpperBiasConcretizedResiduals[i] = workSymbolicUpperBias[i]; - } - - for ( const auto &residualLayerIndex : residualLayerIndices ) - { - DeepPolyElement *residualElement = deepPolyElementsBefore[residualLayerIndex]; - double *currentResidualLb = residualLb[residualLayerIndex]; - double *currentResidualUb = residualUb[residualLayerIndex]; - - // Get concrete bounds - for ( unsigned i = 0; i < residualElement->getSize(); ++i ) - { - double sourceLb = residualElement->getLowerBoundFromLayer( i ) - - GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; - double sourceUb = residualElement->getUpperBoundFromLayer( i ) + - GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; - - for ( unsigned j = 0; j < _outputLayerSize; ++j ) - { - // Compute lower bound - double weight = currentResidualLb[i * _size + j]; - if ( weight >= 0 ) - { - symbolicLowerBiasConcretizedResiduals[j] += ( weight * sourceLb ); - } - else - { - symbolicLowerBiasConcretizedResiduals[j] += ( weight * sourceUb ); - } - - // Compute upper bound - weight = currentResidualUb[i * _size + j]; - if ( weight >= 0 ) - { - symbolicUpperBiasConcretizedResiduals[j] += ( weight * sourceUb ); - } - else - { - symbolicUpperBiasConcretizedResiduals[j] += ( weight * sourceLb ); - } - } - } - } - - for ( unsigned i = 0; i < _size * _outputLayerSize; ++i ) - { - ( *_outputSymbolicLb )[_layerIndex][i] = work1SymbolicLb[i]; - ( *_outputSymbolicUb )[_layerIndex][i] = work1SymbolicUb[i]; - } - - for ( unsigned i = 0; i < _outputLayerSize; ++i ) - { - ( *_outputSymbolicLowerBias )[_layerIndex][i] = symbolicLowerBiasConcretizedResiduals[i]; - ( *_outputSymbolicUpperBias )[_layerIndex][i] = symbolicUpperBiasConcretizedResiduals[i]; - } -} - -} // namespace NLR +/********************* */ +/*! \file DeepPolyElement.cpp + ** \verbatim + ** Top contributors (to current version): + ** Haoze Andrew Wu + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + +**/ + +#include "DeepPolyElement.h" + +namespace NLR { + +DeepPolyElement::DeepPolyElement() + : _layer( NULL ) + , _size( 0 ) + , _layerIndex( 0 ) + , _storeOutputSymbolicBounds( false ) + , _storePredecessorSymbolicBounds( false ) + , _useParameterisedSBT( false ) + , _layerIndicesToParameters( NULL ) + , _outputLayerSize( 0 ) + , _symbolicLb( NULL ) + , _symbolicUb( NULL ) + , _symbolicLowerBias( NULL ) + , _symbolicUpperBias( NULL ) + , _lb( NULL ) + , _ub( NULL ) + , _work1SymbolicLb( NULL ) + , _work1SymbolicUb( NULL ) + , _work2SymbolicLb( NULL ) + , _work2SymbolicUb( NULL ) + , _workSymbolicLowerBias( NULL ) + , _workSymbolicUpperBias( NULL ){}; + +unsigned DeepPolyElement::getSize() const +{ + return _size; +} +unsigned DeepPolyElement::getLayerIndex() const +{ + return _layerIndex; +} + +Layer::Type DeepPolyElement::getLayerType() const +{ + return _layer->getLayerType(); +} + +bool DeepPolyElement::hasPredecessor() +{ + return !_layer->getSourceLayers().empty(); +} + +const Map &DeepPolyElement::getPredecessorIndices() const +{ + const Map &sourceLayers = _layer->getSourceLayers(); + return sourceLayers; +} + +double *DeepPolyElement::getSymbolicLb() const +{ + return _symbolicLb; +} + +double *DeepPolyElement::getSymbolicUb() const +{ + return _symbolicUb; +} + +double *DeepPolyElement::getSymbolicLowerBias() const +{ + return _symbolicLowerBias; +} + +double *DeepPolyElement::getSymbolicUpperBias() const +{ + return _symbolicUpperBias; +} + +double DeepPolyElement::getLowerBound( unsigned index ) const +{ + ASSERT( index < getSize() ); + return _lb[index]; +} + +double DeepPolyElement::getUpperBound( unsigned index ) const +{ + ASSERT( index < getSize() ); + return _ub[index]; +} + +void DeepPolyElement::setStoreOutputSymbolicBounds( bool storeOutputSymbolicBounds ) +{ + _storeOutputSymbolicBounds = storeOutputSymbolicBounds; +} + +void DeepPolyElement::setStorePredecessorSymbolicBounds( bool storePredecessorSymbolicBounds ) +{ + _storePredecessorSymbolicBounds = storePredecessorSymbolicBounds; +} + +void DeepPolyElement::setUseParameterisedSBT( bool useParameterisedSBT ) +{ + _useParameterisedSBT = useParameterisedSBT; +} + +void DeepPolyElement::setLayerIndicesToParameters( + Map> *layerIndicesToParameters ) +{ + _layerIndicesToParameters = layerIndicesToParameters; +} + +void DeepPolyElement::setOutputLayerSize( unsigned outputLayerSize ) +{ + _outputLayerSize = outputLayerSize; +} + +double DeepPolyElement::getLowerBoundFromLayer( unsigned index ) const +{ + ASSERT( index < getSize() ); + return _layer->getLb( index ); +} + +double DeepPolyElement::getUpperBoundFromLayer( unsigned index ) const +{ + ASSERT( index < getSize() ); + return _layer->getUb( index ); +} + +void DeepPolyElement::getConcreteBounds() +{ + unsigned size = getSize(); + for ( unsigned i = 0; i < size; ++i ) + { + _lb[i] = _layer->getLb( i ); + _ub[i] = _layer->getUb( i ); + } +} + +void DeepPolyElement::allocateMemory() +{ + freeMemoryIfNeeded(); + + unsigned size = getSize(); + _lb = new double[size]; + _ub = new double[size]; + + std::fill_n( _lb, size, FloatUtils::negativeInfinity() ); + std::fill_n( _ub, size, FloatUtils::infinity() ); +} + +void DeepPolyElement::freeMemoryIfNeeded() +{ + if ( _lb ) + { + delete[] _lb; + _lb = NULL; + } + + if ( _ub ) + { + delete[] _ub; + _ub = NULL; + } +} + +void DeepPolyElement::setWorkingMemory( double *work1SymbolicLb, + double *work1SymbolicUb, + double *work2SymbolicLb, + double *work2SymbolicUb, + double *workSymbolicLowerBias, + double *workSymbolicUpperBias ) +{ + _work1SymbolicLb = work1SymbolicLb; + _work1SymbolicUb = work1SymbolicUb; + _work2SymbolicLb = work2SymbolicLb; + _work2SymbolicUb = work2SymbolicUb; + _workSymbolicLowerBias = workSymbolicLowerBias; + _workSymbolicUpperBias = workSymbolicUpperBias; +} + +void DeepPolyElement::setSymbolicBoundsMemory( + Map> *outputSymbolicLb, + Map> *outputSymbolicUb, + Map> *outputSymbolicLowerBias, + Map> *outputSymbolicUpperBias, + Map> *predecessorSymbolicLb, + Map> *predecessorSymbolicUb, + Map> *predecessorSymbolicLowerBias, + Map> *predecessorSymbolicUpperBias ) +{ + _outputSymbolicLb = outputSymbolicLb; + _outputSymbolicUb = outputSymbolicUb; + _outputSymbolicLowerBias = outputSymbolicLowerBias; + _outputSymbolicUpperBias = outputSymbolicUpperBias; + _predecessorSymbolicLb = predecessorSymbolicLb; + _predecessorSymbolicUb = predecessorSymbolicUb; + _predecessorSymbolicLowerBias = predecessorSymbolicLowerBias; + _predecessorSymbolicUpperBias = predecessorSymbolicUpperBias; +} + +void DeepPolyElement::storeOutputSymbolicBounds( + double *work1SymbolicLb, + double *work1SymbolicUb, + double *workSymbolicLowerBias, + double *workSymbolicUpperBias, + Map &residualLb, + Map &residualUb, + Set &residualLayerIndices, + const Map &deepPolyElementsBefore ) +{ + // Remove externally fixed neurons from symbolic bounds, replace them with their value. + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _layer->neuronEliminated( i ) ) + { + double value = _layer->getEliminatedNeuronValue( i ); + for ( unsigned j = 0; j < _outputLayerSize; ++j ) + { + workSymbolicLowerBias[i] += work1SymbolicLb[i * _size + j] * value; + workSymbolicUpperBias[i] += work1SymbolicUb[i * _size + j] * value; + work1SymbolicLb[i * _size + j] = 0; + work1SymbolicUb[i * _size + j] = 0; + } + } + } + + // Remove residual layers from symbolic bounds, concretize them instead. + Vector symbolicLowerBiasConcretizedResiduals( _outputLayerSize, 0 ); + Vector symbolicUpperBiasConcretizedResiduals( _outputLayerSize, 0 ); + for ( unsigned i = 0; i < _outputLayerSize; ++i ) + { + symbolicLowerBiasConcretizedResiduals[i] = workSymbolicLowerBias[i]; + symbolicUpperBiasConcretizedResiduals[i] = workSymbolicUpperBias[i]; + } + for ( const auto &residualLayerIndex : residualLayerIndices ) + { + DeepPolyElement *residualElement = deepPolyElementsBefore[residualLayerIndex]; + double *currentResidualLb = residualLb[residualLayerIndex]; + double *currentResidualUb = residualUb[residualLayerIndex]; + + // Get concrete bounds for residual neurons. + for ( unsigned i = 0; i < residualElement->getSize(); ++i ) + { + double sourceLb = residualElement->getLowerBoundFromLayer( i ) - + GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; + double sourceUb = residualElement->getUpperBoundFromLayer( i ) + + GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; + + for ( unsigned j = 0; j < _outputLayerSize; ++j ) + { + double lowerWeight = currentResidualLb[i * _outputLayerSize + j]; + double upperWeight = currentResidualUb[i * _outputLayerSize + j]; + symbolicLowerBiasConcretizedResiduals[j] += + lowerWeight >= 0 ? lowerWeight * sourceLb : lowerWeight * sourceUb; + symbolicUpperBiasConcretizedResiduals[j] += + upperWeight >= 0 ? upperWeight * sourceUb : upperWeight * sourceLb; + } + } + } + + // Store updated bounds. + for ( unsigned i = 0; i < _size * _outputLayerSize; ++i ) + { + ( *_outputSymbolicLb )[_layerIndex][i] = work1SymbolicLb[i]; + ( *_outputSymbolicUb )[_layerIndex][i] = work1SymbolicUb[i]; + } + for ( unsigned i = 0; i < _outputLayerSize; ++i ) + { + ( *_outputSymbolicLowerBias )[_layerIndex][i] = symbolicLowerBiasConcretizedResiduals[i]; + ( *_outputSymbolicUpperBias )[_layerIndex][i] = symbolicUpperBiasConcretizedResiduals[i]; + } +} + +} // namespace NLR diff --git a/src/nlr/DeepPolyLeakyReLUElement.cpp b/src/nlr/DeepPolyLeakyReLUElement.cpp index 358d02770d..ee63944f43 100644 --- a/src/nlr/DeepPolyLeakyReLUElement.cpp +++ b/src/nlr/DeepPolyLeakyReLUElement.cpp @@ -221,7 +221,6 @@ void DeepPolyLeakyReLUElement::execute( log( Stringf( "Neuron%u LB: %f, UB: %f", i, _lb[i], _ub[i] ) ); } - if ( _storePredecessorSymbolicBounds ) { storePredecessorSymbolicBounds(); @@ -234,9 +233,8 @@ void DeepPolyLeakyReLUElement::storePredecessorSymbolicBounds() { for ( unsigned i = 0; i < _size; ++i ) { - NeuronIndex sourceIndex = *( _layer->getActivationSources( i ).begin() ); - ( *_predecessorSymbolicLb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicLb[i]; - ( *_predecessorSymbolicUb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicUb[i]; + ( *_predecessorSymbolicLb )[_layerIndex][i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][i] = _symbolicUb[i]; ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; } diff --git a/src/nlr/DeepPolyMaxPoolElement.cpp b/src/nlr/DeepPolyMaxPoolElement.cpp index 4957e60ab3..398f5493ee 100644 --- a/src/nlr/DeepPolyMaxPoolElement.cpp +++ b/src/nlr/DeepPolyMaxPoolElement.cpp @@ -131,7 +131,7 @@ void DeepPolyMaxPoolElement::storePredecessorSymbolicBounds( { for ( unsigned i = 0; i < _size; ++i ) { - if ( _phaseFixed[i] > 0 ) + if ( _phaseFixed[i] ) { ( *_predecessorSymbolicLb )[_layerIndex][_size * indexOfMaxLowerBound[i] + i] = 1; ( *_predecessorSymbolicUb )[_layerIndex][_size * indexOfMaxLowerBound[i] + i] = 1; diff --git a/src/nlr/DeepPolyReLUElement.cpp b/src/nlr/DeepPolyReLUElement.cpp index 51accad00f..bbb3b4c161 100644 --- a/src/nlr/DeepPolyReLUElement.cpp +++ b/src/nlr/DeepPolyReLUElement.cpp @@ -180,9 +180,8 @@ void DeepPolyReLUElement::storePredecessorSymbolicBounds() { for ( unsigned i = 0; i < _size; ++i ) { - NeuronIndex sourceIndex = *( _layer->getActivationSources( i ).begin() ); - ( *_predecessorSymbolicLb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicLb[i]; - ( *_predecessorSymbolicUb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicUb[i]; + ( *_predecessorSymbolicLb )[_layerIndex][i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][i] = _symbolicUb[i]; ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; } diff --git a/src/nlr/DeepPolyRoundElement.cpp b/src/nlr/DeepPolyRoundElement.cpp index 15561fbeff..fbfb09afa7 100644 --- a/src/nlr/DeepPolyRoundElement.cpp +++ b/src/nlr/DeepPolyRoundElement.cpp @@ -95,9 +95,8 @@ void DeepPolyRoundElement::storePredecessorSymbolicBounds() { for ( unsigned i = 0; i < _size; ++i ) { - NeuronIndex sourceIndex = *( _layer->getActivationSources( i ).begin() ); - ( *_predecessorSymbolicLb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicLb[i]; - ( *_predecessorSymbolicUb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicUb[i]; + ( *_predecessorSymbolicLb )[_layerIndex][i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][i] = _symbolicUb[i]; ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; } diff --git a/src/nlr/DeepPolySigmoidElement.cpp b/src/nlr/DeepPolySigmoidElement.cpp index 0f8fb71afc..1f290b0210 100644 --- a/src/nlr/DeepPolySigmoidElement.cpp +++ b/src/nlr/DeepPolySigmoidElement.cpp @@ -112,9 +112,8 @@ void DeepPolySigmoidElement::storePredecessorSymbolicBounds() { for ( unsigned i = 0; i < _size; ++i ) { - NeuronIndex sourceIndex = *( _layer->getActivationSources( i ).begin() ); - ( *_predecessorSymbolicLb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicLb[i]; - ( *_predecessorSymbolicUb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicUb[i]; + ( *_predecessorSymbolicLb )[_layerIndex][i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][i] = _symbolicUb[i]; ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; } diff --git a/src/nlr/DeepPolySignElement.cpp b/src/nlr/DeepPolySignElement.cpp index c915261723..432f04e1c4 100644 --- a/src/nlr/DeepPolySignElement.cpp +++ b/src/nlr/DeepPolySignElement.cpp @@ -165,9 +165,8 @@ void DeepPolySignElement::storePredecessorSymbolicBounds() { for ( unsigned i = 0; i < _size; ++i ) { - NeuronIndex sourceIndex = *( _layer->getActivationSources( i ).begin() ); - ( *_predecessorSymbolicLb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicLb[i]; - ( *_predecessorSymbolicUb )[_layerIndex][_size * sourceIndex._neuron + i] = _symbolicUb[i]; + ( *_predecessorSymbolicLb )[_layerIndex][i] = _symbolicLb[i]; + ( *_predecessorSymbolicUb )[_layerIndex][i] = _symbolicUb[i]; ( *_predecessorSymbolicLowerBias )[_layerIndex][i] = _symbolicLowerBias[i]; ( *_predecessorSymbolicUpperBias )[_layerIndex][i] = _symbolicUpperBias[i]; } diff --git a/src/nlr/DeepPolySoftmaxElement.cpp b/src/nlr/DeepPolySoftmaxElement.cpp index 3490b1fa43..d6d8713fd5 100644 --- a/src/nlr/DeepPolySoftmaxElement.cpp +++ b/src/nlr/DeepPolySoftmaxElement.cpp @@ -199,10 +199,18 @@ void DeepPolySoftmaxElement::execute( void DeepPolySoftmaxElement::storePredecessorSymbolicBounds() { - for ( unsigned i = 0; i < _size * _size; ++i ) + for ( unsigned i = 0; i < _size; ++i ) { - ( *_predecessorSymbolicLb )[_layerIndex][i] = _symbolicLb[i]; - ( *_predecessorSymbolicUb )[_layerIndex][i] = _symbolicUb[i]; + List sources = _layer->getActivationSources( i ); + unsigned inputIndex = 0; + for ( const auto &sourceIndex : sources ) + { + ( *_predecessorSymbolicLb )[_layerIndex][_size * inputIndex + i] = + _symbolicLb[_size * sourceIndex._neuron + i]; + ( *_predecessorSymbolicUb )[_layerIndex][_size * inputIndex + i] = + _symbolicUb[_size * sourceIndex._neuron + i]; + ++inputIndex; + } } for ( unsigned i = 0; i < _size; ++i ) @@ -304,7 +312,6 @@ void DeepPolySoftmaxElement::symbolicBoundInTermsOfPredecessor( predecessor->getLayerIndex() ) ); } - void DeepPolySoftmaxElement::allocateMemory( unsigned maxLayerSize ) { freeMemoryIfNeeded(); diff --git a/src/nlr/DeepPolyWeightedSumElement.cpp b/src/nlr/DeepPolyWeightedSumElement.cpp index 62b0e2209b..19c0f99fcd 100644 --- a/src/nlr/DeepPolyWeightedSumElement.cpp +++ b/src/nlr/DeepPolyWeightedSumElement.cpp @@ -397,7 +397,6 @@ void DeepPolyWeightedSumElement::concretizeSymbolicBoundForSourceLayer( } } - void DeepPolyWeightedSumElement::symbolicBoundInTermsOfPredecessor( const double *symbolicLb, const double *symbolicUb, diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 67f2725155..1649b21a24 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -2,7 +2,7 @@ /*! \file LPFormulator.cpp ** \verbatim ** Top contributors (to current version): - ** Guy Katz + ** Guy Katz, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -1217,7 +1217,6 @@ void LPFormulator::addSignLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1485,20 +1484,21 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; Vector sourceNeurons; + Vector sourceLayers; bool allConstant = true; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + sourceLayers.append( sourceLayer ); sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); @@ -1547,10 +1547,10 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addGeqConstraint( terms, -sourceLbs[0] * sourceLbs[1] ); // Upper bound: out <= u_y * x + l_x * y - l_x * u_y @@ -1558,10 +1558,10 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -sourceUbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addLeqConstraint( terms, -sourceLbs[0] * sourceUbs[1] ); } } @@ -2050,20 +2050,21 @@ void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &g List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; Vector sourceNeurons; + Vector sourceLayers; bool allConstant = true; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + sourceLayers.append( sourceLayer ); sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); @@ -2115,17 +2116,17 @@ void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &g // Upper bound: out <= a_u * x + b_u * y + c_u, where // a_u = alpha2 * u_y + ( 1 - alpha2 ) * l_y // b_u = alpha2 * l_x + ( 1 - alpha2 ) * u_x - // c_u = -alpha2 * -l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + // c_u = -alpha2 * l_x * u_y - ( 1 - alpha2 ) * u_x * l_y List terms; terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -coeffs[0] * sourceLbs[1] - ( 1 - coeffs[0] ) * sourceUbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -coeffs[0] * sourceLbs[0] - ( 1 - coeffs[0] ) * sourceUbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addGeqConstraint( terms, -coeffs[0] * sourceLbs[0] * sourceLbs[1] - ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1] ); @@ -2134,10 +2135,10 @@ void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &g terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -coeffs[1] * sourceUbs[1] - ( 1 - coeffs[1] ) * sourceLbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -coeffs[1] * sourceLbs[0] - ( 1 - coeffs[1] ) * sourceUbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addLeqConstraint( terms, -coeffs[1] * sourceLbs[0] * sourceUbs[1] - ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1] ); @@ -2162,8 +2163,8 @@ void LPFormulator::addPolyognalTighteningsToLpRelaxation( bool outOfBounds = false; for ( const auto &pair : neuronToCoefficient ) { - unsigned currentLayerIndex = pair.first._layer; - if ( currentLayerIndex < firstLayer || currentLayerIndex > lastLayer ) + unsigned layerIndex = pair.first._layer; + if ( layerIndex < firstLayer || layerIndex > lastLayer ) { outOfBounds = true; } @@ -2176,24 +2177,24 @@ void LPFormulator::addPolyognalTighteningsToLpRelaxation( terms.clear(); for ( const auto &pair : neuronToCoefficient ) { - unsigned currentLayerIndex = pair.first._layer; - unsigned i = pair.first._neuron; + unsigned layerIndex = pair.first._layer; + unsigned neuron = pair.first._neuron; double coeff = pair.second; - Layer *layer = layers[currentLayerIndex]; + Layer *layer = layers[layerIndex]; - if ( !layer->neuronEliminated( i ) ) + if ( !layer->neuronEliminated( neuron ) ) { - unsigned variable = layer->neuronToVariable( i ); - String variableName = Stringf( "x%u", variable ); + const String variableName = Stringf( "x%u", layer->neuronToVariable( neuron ) ); if ( !gurobi.containsVariable( variableName ) ) { - gurobi.addVariable( variableName, layer->getLb( i ), layer->getUb( i ) ); + gurobi.addVariable( + variableName, layer->getLb( neuron ), layer->getUb( neuron ) ); } - terms.append( GurobiWrapper::Term( coeff, Stringf( "x%u", variable ) ) ); + terms.append( GurobiWrapper::Term( coeff, variableName ) ); } else { - value -= coeff * layer->getEliminatedNeuronValue( i ); + value -= coeff * layer->getEliminatedNeuronValue( neuron ); } } diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index 6b4ef5c7b3..7d0737b635 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -2,7 +2,7 @@ /*! \file LPFormulator.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz + ** Guy Katz, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -152,7 +152,6 @@ class LPFormulator : public ParallelSolver const Vector &polygonalTightenings = Vector( {} ) ); - // Create LP relaxations depending on external parameters. void addLayerToParameterisedModel( GurobiWrapper &gurobi, const Layer *layer, diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 43f24e341b..cb43168c0c 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -2,7 +2,7 @@ /*! \file Layer.cpp ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Ido Shmuel + ** Guy Katz ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -1055,7 +1055,6 @@ void Layer::computeIntervalArithmeticBoundsForSigmoid() double lbSigmoid = SigmoidConstraint::sigmoid( lb ); double ubSigmoid = SigmoidConstraint::sigmoid( ub ); - if ( _lb[i] < lbSigmoid ) { _lb[i] = lbSigmoid; @@ -1087,7 +1086,6 @@ void Layer::computeIntervalArithmeticBoundsForRound() double lbRound = FloatUtils::round( lb ); double ubRound = FloatUtils::round( ub ); - if ( _lb[i] < lbRound ) { _lb[i] = lbRound; @@ -1261,14 +1259,13 @@ void Layer::computeIntervalArithmeticBoundsForBilinear() List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; bool allConstant = true; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); @@ -1322,7 +1319,6 @@ void Layer::computeIntervalArithmeticBoundsForBilinear() ub = v; } - if ( _lb[i] < lb ) { _lb[i] = lb; @@ -2067,7 +2063,6 @@ void Layer::computeSymbolicBoundsForLeakyRelu() _symbolicUpperBias[i] *= weight; _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; - // For the lower bound, in general, x_f >= lambda * x_b, where // 0 <= lambda <= 1, would be a sound lower bound. We // use the heuristic described in section 4.1 of @@ -2347,7 +2342,6 @@ void Layer::computeSymbolicBoundsForSigmoid() _symbolicUpperBias[i] += sourceUbSigmoid - lambdaPrime * sourceUb; } - /* We now have the symbolic representation for the current layer. Next, we compute new lower and upper bounds for @@ -2472,7 +2466,6 @@ void Layer::computeSymbolicBoundsForRound() _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - // Bounds of lb, ub are the rounded values of source lb, ub double sourceUbRound = FloatUtils::round( sourceUb ); double sourceLbRound = FloatUtils::round( sourceLb ); @@ -2482,7 +2475,6 @@ void Layer::computeSymbolicBoundsForRound() _symbolicLbOfLb[i] = sourceLbRound; _symbolicUbOfLb[i] = sourceLbRound; - // Case when the Round constraint is fixed if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) { @@ -2616,7 +2608,6 @@ void Layer::computeSymbolicBoundsForMax() _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[indexOfMaxLowerBound._neuron]; - _symbolicLbOfLb[i] = maxLowerBound; _symbolicUbOfLb[i] = maxLowerBound; _symbolicLbOfUb[i] = sourceUbs[indexOfMaxLowerBound]; @@ -2712,7 +2703,6 @@ void Layer::computeSymbolicBoundsForSoftmax() Vector sourceMids; Vector targetLbs; Vector targetUbs; - unsigned len = 0; for ( const auto &sourceIndex : sources ) { unsigned sourceNeuron = sourceIndex._neuron; @@ -2724,8 +2714,6 @@ void Layer::computeSymbolicBoundsForSoftmax() sourceMids.append( ( sourceLb + sourceUb ) / 2 ); targetLbs.append( _lb[i] ); targetUbs.append( _ub[i] ); - - ++len; } // Find the index of i in the softmax @@ -2764,8 +2752,8 @@ void Layer::computeSymbolicBoundsForSoftmax() _symbolicUpperBias[i] = _ub[i]; for ( const auto &sourceIndex : sources ) { - symbolicLb[len * sourceIndex._neuron + i] = 0; - symbolicUb[len * sourceIndex._neuron + i] = 0; + symbolicLb[_size * sourceIndex._neuron + i] = 0; + symbolicUb[_size * sourceIndex._neuron + i] = 0; } } else @@ -2788,7 +2776,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dldj = dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; + symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -2801,7 +2789,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dldj = dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; + symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -2813,7 +2801,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dudj = dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - symbolicUb[len * sourceIndex._neuron + i] = dudj; + symbolicUb[_size * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; } @@ -2826,7 +2814,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dldj = dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; + symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -2837,7 +2825,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dudj = dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - symbolicUb[len * sourceIndex._neuron + i] = dudj; + symbolicUb[_size * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; } @@ -3039,27 +3027,26 @@ void Layer::computeSymbolicBoundsForBilinear() List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; + Vector sourceNeurons; + Vector sourceLayerSizes; + Vector sourceLayers; bool allConstant = true; - unsigned indexA = 0; - unsigned indexB = 0; - unsigned counter = 0; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); + unsigned sourceLayerSize = sourceLayer->getSize(); + sourceLayers.append( sourceLayer ); + sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); + sourceLayerSizes.append( sourceLayerSize ); if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) { @@ -3070,16 +3057,6 @@ void Layer::computeSymbolicBoundsForBilinear() double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); sourceValues.append( sourceValue ); } - - if ( counter == 0 ) - { - indexA = sourceIndex._neuron; - } - else - { - indexB = sourceIndex._neuron; - } - ++counter; } if ( allConstant ) @@ -3109,47 +3086,75 @@ void Layer::computeSymbolicBoundsForBilinear() // Symbolic upper bound: // out <= alpha * x + beta * y + gamma // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y + double aLower = sourceLbs[1]; + double aUpper = sourceUbs[1]; + double bLower = sourceLbs[0]; + double bUpper = sourceLbs[0]; + _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; + _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - if ( sourceLbs[1] >= 0 ) + if ( aLower >= 0 ) { _symbolicLb[j * _size + i] += - sourceLbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + aLower * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; } else { _symbolicLb[j * _size + i] += - sourceLbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + aLower * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; } - if ( sourceUbs[1] >= 0 ) + if ( aUpper >= 0 ) { _symbolicUb[j * _size + i] += - sourceUbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + aUpper * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; } else { _symbolicUb[j * _size + i] += - sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + aUpper * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; } - if ( sourceLbs[0] >= 0 ) + if ( bLower >= 0 ) { _symbolicLb[j * _size + i] += - sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; - _symbolicUb[j * _size + i] += - sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + bLower * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; } else { _symbolicLb[j * _size + i] += - sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + bLower * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + } + + if ( bUpper >= 0 ) + { + _symbolicUb[j * _size + i] += + bUpper * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + } + else + { _symbolicUb[j * _size + i] += - sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; + bUpper * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; } } - _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; - _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); @@ -3923,7 +3928,6 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( const Vector _symbolicUpperBias[i] *= weight; _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; - // For the lower bound, in general, x_f >= lambda * x_b, where // 0 <= lambda <= 1, would be a sound lower bound. We // use the heuristic described in section 4.1 of @@ -4096,27 +4100,26 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; + Vector sourceNeurons; + Vector sourceLayerSizes; + Vector sourceLayers; bool allConstant = true; - unsigned indexA = 0; - unsigned indexB = 0; - unsigned counter = 0; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); + unsigned sourceLayerSize = sourceLayer->getSize(); + sourceLayers.append( sourceLayer ); + sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); + sourceLayerSizes.append( sourceLayerSize ); if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) { @@ -4127,16 +4130,6 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); sourceValues.append( sourceValue ); } - - if ( counter == 0 ) - { - indexA = sourceIndex._neuron; - } - else - { - indexB = sourceIndex._neuron; - } - ++counter; } if ( allConstant ) @@ -4175,58 +4168,74 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector double bLower = coeffs[0] * sourceLbs[0] + ( 1 - coeffs[0] ) * sourceUbs[0]; double bUpper = coeffs[1] * sourceLbs[0] + ( 1 - coeffs[1] ) * sourceUbs[0]; + _symbolicLowerBias[i] = -coeffs[0] * sourceLbs[0] * sourceLbs[1] - + ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1]; + _symbolicUpperBias[i] = -coeffs[1] * sourceLbs[0] * sourceUbs[1] - + ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1]; + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { if ( aLower >= 0 ) { _symbolicLb[j * _size + i] += - aLower * sourceSymbolicLb[j * sourceLayerSize + indexA]; + aLower * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; } else { _symbolicLb[j * _size + i] += - aLower * sourceSymbolicUb[j * sourceLayerSize + indexA]; + aLower * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; } if ( aUpper >= 0 ) { _symbolicUb[j * _size + i] += - aUpper * sourceSymbolicUb[j * sourceLayerSize + indexA]; + aUpper * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; } else { _symbolicUb[j * _size + i] += - aUpper * sourceSymbolicLb[j * sourceLayerSize + indexA]; + aUpper * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; } if ( bLower >= 0 ) { _symbolicLb[j * _size + i] += - bLower * sourceSymbolicLb[j * sourceLayerSize + indexB]; + bLower * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; } else { _symbolicLb[j * _size + i] += - bLower * sourceSymbolicUb[j * sourceLayerSize + indexB]; + bLower * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; } - if ( bLower >= 0 ) + if ( bUpper >= 0 ) { _symbolicUb[j * _size + i] += - bUpper * sourceSymbolicUb[j * sourceLayerSize + indexB]; + bUpper * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; } else { _symbolicUb[j * _size + i] += - bUpper * sourceSymbolicLb[j * sourceLayerSize + indexB]; + bUpper * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; } } - _symbolicLowerBias[i] = -coeffs[0] * sourceLbs[0] * sourceLbs[1] - - ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1]; - _symbolicUpperBias[i] = -coeffs[1] * sourceLbs[0] * sourceUbs[1] - - ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1]; - double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); List values = { sourceLbs[0] * sourceLbs[1], @@ -4587,7 +4596,6 @@ double Layer::dERUpperBound( const Vector &inputMids, double li = outputLb[i]; double ui = outputUb[i]; - if ( i == di ) { double val2 = -1; @@ -4634,6 +4642,256 @@ void Layer::eliminateVariable( unsigned variable, double value ) _variableToNeuron.erase( variable ); } +const Vector Layer::getNonfixedNeurons() const +{ + Vector nonfixedNeurons = Vector( {} ); + for ( unsigned i = 0; i < _size; ++i ) + { + if ( neuronNonfixed( i ) ) + { + nonfixedNeurons.append( NeuronIndex( _layerIndex, i ) ); + } + } + const Vector neuronList = Vector( nonfixedNeurons ); + return neuronList; +} + +bool Layer::neuronNonfixed( unsigned neuron ) const +{ + if ( _eliminatedNeurons.exists( neuron ) ) + { + return false; + } + + switch ( _type ) + { + case INPUT: + case WEIGHTED_SUM: + { + return false; + break; + } + case RELU: + case LEAKY_RELU: + case SIGN: + case ABSOLUTE_VALUE: + { + return neuronNonfixedAtZero( neuron ); + break; + } + case SIGMOID: + { + return neuronNonfixedSigmoid( neuron ); + break; + } + case Layer::ROUND: + { + return neuronNonfixedRound( neuron ); + break; + } + case MAX: + { + return neuronNonfixedMax( neuron ); + break; + } + case SOFTMAX: + { + return neuronNonfixedSoftmax( neuron ); + break; + } + case BILINEAR: + { + return neuronNonfixedBilinear( neuron ); + break; + } + default: + { + printf( "Error! Neuron type %u unsupported\n", _type ); + throw MarabouError( MarabouError::NETWORK_LEVEL_REASONER_ACTIVATION_NOT_SUPPORTED ); + break; + } + } +} + +bool Layer::neuronNonfixedAtZero( unsigned neuron ) const +{ + // A Relu/Sign/Abs/Leaky Relu activation is non-fixed if it's not exernally fixed, + // its source neuron isn't externally fixed and sourceLb < 0 < sourceUb. + NeuronIndex sourceIndex = *_neuronToActivationSources[neuron].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + if ( sourceLayer->neuronEliminated( sourceIndex._neuron ) ) + { + return false; + } + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + return FloatUtils::isNegative( sourceLb ) && FloatUtils::isPositive( sourceUb ); +} + +bool Layer::neuronNonfixedSigmoid( unsigned neuron ) const +{ + // A Sigmoid activation is non-fixed if it's not exernally fixed, + // its source neuron isn't externally fixed and sourceUb != sourceLb. + NeuronIndex sourceIndex = *_neuronToActivationSources[neuron].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + if ( sourceLayer->neuronEliminated( sourceIndex._neuron ) ) + { + return false; + } + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + return !FloatUtils::areEqual( sourceLb, sourceUb ); +} + +bool Layer::neuronNonfixedRound( unsigned neuron ) const +{ + // A Round activation is non-fixed if it's not exernally fixed, + // its source neuron isn't externally fixed and round( sourceUb ) != round( sourceLb ). + NeuronIndex sourceIndex = *_neuronToActivationSources[neuron].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + if ( sourceLayer->neuronEliminated( sourceIndex._neuron ) ) + { + return false; + } + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + return !FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ); +} + +bool Layer::neuronNonfixedMax( unsigned neuron ) const +{ + // A Max activation is non-fixed if not all its sources have a fixed value and no source + // has a lower bound larger than the upper-bounds of the other source variables. + List sources = getActivationSources( neuron ); + NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); + double maxLowerBound = FloatUtils::negativeInfinity(); + double maxUpperBound = FloatUtils::negativeInfinity(); + Map sourceUbs; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + unsigned sourceNeuron = sourceIndex._neuron; + if ( !sourceLayer->neuronEliminated( sourceIndex._neuron ) ) + { + allConstant = false; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + sourceUbs[sourceIndex] = sourceUb; + if ( maxLowerBound < sourceLb ) + { + indexOfMaxLowerBound = sourceIndex; + maxLowerBound = sourceLb; + } + if ( maxUpperBound < sourceUb ) + { + maxUpperBound = sourceUb; + } + } + } + + if ( allConstant ) + { + return false; + } + + bool phaseFixed = true; + for ( const auto &sourceIndex : sources ) + { + if ( sourceIndex != indexOfMaxLowerBound && + FloatUtils::gt( sourceUbs[sourceIndex], maxLowerBound ) ) + { + phaseFixed = false; + break; + } + } + return !phaseFixed; +} + +bool Layer::neuronNonfixedSoftmax( unsigned neuron ) const +{ + // A Softmax activation is non-fixed if not all its sources have a fixed value + // and its source neuron in the Softmax satisfies sourceLb != sourceUb. + List sources = getActivationSources( neuron ); + Vector sourceLbs; + Vector sourceUbs; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + unsigned sourceNeuron = sourceIndex._neuron; + if ( !sourceLayer->neuronEliminated( sourceIndex._neuron ) ) + { + allConstant = false; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + } + } + + if ( allConstant ) + { + return false; + } + + unsigned selfIndex = 0; + Set handledInputNeurons; + for ( unsigned i = 0; i < neuron; ++i ) + { + for ( const auto &sourceIndex : getActivationSources( i ) ) + { + if ( !handledInputNeurons.exists( sourceIndex._neuron ) ) + { + handledInputNeurons.insert( sourceIndex._neuron ); + break; + } + } + } + for ( const auto &sourceIndex : sources ) + { + if ( handledInputNeurons.exists( sourceIndex._neuron ) ) + { + ++selfIndex; + } + else + { + break; + } + } + + double lb = std::max( Layer::linearLowerBound( sourceLbs, sourceUbs, selfIndex ), _lb[neuron] ); + double ub = std::min( Layer::linearUpperBound( sourceLbs, sourceUbs, selfIndex ), _ub[neuron] ); + + return !FloatUtils::areEqual( lb, ub ); +} + +bool Layer::neuronNonfixedBilinear( unsigned neuron ) const +{ + // A Softmax activation is non-fixed if not all its sources have a fixed value. + List sources = getActivationSources( neuron ); + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + unsigned sourceNeuron = sourceIndex._neuron; + if ( sourceLayer->neuronEliminated( sourceIndex._neuron ) ) + { + return false; + } + + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + if ( FloatUtils::areEqual( sourceLb, sourceUb ) ) + { + return false; + } + } + return true; +} + void Layer::updateVariableIndices( const Map &oldIndexToNewIndex, const Map &mergedVariables ) { diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index cfff68e8a6..f2ec9f5d42 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -2,7 +2,7 @@ /*! \file Layer.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Ido Shmuel + ** Guy Katz ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -140,6 +140,12 @@ class Layer void computeSymbolicBounds(); void computeParameterisedSymbolicBounds( const Vector &coeffs, bool receive = false ); + // Get all non-fixed neurons in a single layer. + const Vector getNonfixedNeurons() const; + + // Determine whether given non-linear activaton neuron has a non-fixed phase. + bool neuronNonfixed( unsigned neuron ) const; + const double *getSymbolicLb() const; const double *getSymbolicUb() const; const double *getSymbolicLowerBias() const; @@ -309,6 +315,16 @@ class Layer void computeIntervalArithmeticBoundsForSoftmax(); void computeIntervalArithmeticBoundsForBilinear(); + /* + Helper functions for determining whether given non-linear activaton has a non-fixed phase. + */ + bool neuronNonfixedAtZero( unsigned neuron ) const; + bool neuronNonfixedSigmoid( unsigned neuron ) const; + bool neuronNonfixedRound( unsigned neuron ) const; + bool neuronNonfixedMax( unsigned neuron ) const; + bool neuronNonfixedSoftmax( unsigned neuron ) const; + bool neuronNonfixedBilinear( unsigned neuron ) const; + double getSymbolicLbOfLb( unsigned neuron ) const; double getSymbolicUbOfLb( unsigned neuron ) const; double getSymbolicLbOfUb( unsigned neuron ) const; diff --git a/src/nlr/LayerOwner.h b/src/nlr/LayerOwner.h index b8ae03cb0f..99d08743b3 100644 --- a/src/nlr/LayerOwner.h +++ b/src/nlr/LayerOwner.h @@ -36,7 +36,7 @@ class LayerOwner virtual const ITableau *getTableau() const = 0; virtual unsigned getNumberOfLayers() const = 0; virtual void receiveTighterBound( Tightening tightening ) = 0; - virtual void receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ) = 0; + virtual void receivePolygonalTightening( PolygonalTightening &polygonalTightening ) = 0; }; } // namespace NLR diff --git a/src/nlr/NLRError.h b/src/nlr/NLRError.h index 69016ea8c1..18aca963ce 100644 --- a/src/nlr/NLRError.h +++ b/src/nlr/NLRError.h @@ -28,7 +28,7 @@ class NLRError : public Error LEAKY_RELU_SLOPES_NOT_UNIFORM = 3, RELU_NOT_FOUND = 4, LAYER_NOT_FOUND = 5, - NEURON_NOT_FOUND = 5, + NEURON_NOT_FOUND = 6, }; NLRError( NLRError::Code code ) diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index 42c2af69b1..d5bfbe6c73 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -2,7 +2,7 @@ /*! \file NetworkLevelReasoner.cpp ** \verbatim ** Top contributors (to current version): - ** Guy Katz + ** Guy Katz, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -202,23 +202,41 @@ void NetworkLevelReasoner::clearConstraintTightenings() _boundTightenings.clear(); } -void NetworkLevelReasoner::receivePolygonalTighterBound( PolygonalTightening polygonalTightening ) +void NetworkLevelReasoner::receivePolygonalTightening( PolygonalTightening &polygonalTightening ) { _polygonalBoundTightenings.append( polygonalTightening ); } -void NetworkLevelReasoner::getConstraintPolygonalTightenings( +void NetworkLevelReasoner::getPolygonalTightenings( List &polygonalTightenings ) { polygonalTightenings = _polygonalBoundTightenings; _polygonalBoundTightenings.clear(); } -void NetworkLevelReasoner::clearConstraintPolygonalTightenings() +void NetworkLevelReasoner::clearPolygonalTightenings() { _polygonalBoundTightenings.clear(); } +void NetworkLevelReasoner::receiveInfeasibleBranches( + Map &neuronToBranchIndex ) +{ + _infeasibleBranches.append( neuronToBranchIndex ); +} + +void NetworkLevelReasoner::getInfeasibleBranches( + List> &infeasibleBranches ) +{ + infeasibleBranches = _infeasibleBranches; + _infeasibleBranches.clear(); +} + +void NetworkLevelReasoner::clearInfeasibleBranches() +{ + _infeasibleBranches.clear(); +} + void NetworkLevelReasoner::symbolicBoundPropagation() { for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) @@ -242,6 +260,127 @@ void NetworkLevelReasoner::deepPolyPropagation() _deepPolyAnalysis->run(); } +void NetworkLevelReasoner::parameterisedDeepPoly( bool storeSymbolicBounds, + const Vector &coeffs ) +{ + if ( !storeSymbolicBounds ) + { + bool useParameterisedSBT = coeffs.size() > 0; + Map> layerIndicesToParameters = + Map>( {} ); + if ( useParameterisedSBT ) + { + layerIndicesToParameters = getParametersForLayers( coeffs ); + } + + _deepPolyAnalysis = + std::unique_ptr( new DeepPolyAnalysis( this, + storeSymbolicBounds, + storeSymbolicBounds, + useParameterisedSBT, + &layerIndicesToParameters ) ); + + // Clear deepPolyAnalysis pointer after running. + _deepPolyAnalysis->run(); + _deepPolyAnalysis = nullptr; + } + else + { + // Clear the previous symbolic bound maps. + _outputSymbolicLb.clear(); + _outputSymbolicUb.clear(); + _outputSymbolicLowerBias.clear(); + _outputSymbolicUpperBias.clear(); + + _predecessorSymbolicLb.clear(); + _predecessorSymbolicUb.clear(); + _predecessorSymbolicLowerBias.clear(); + _predecessorSymbolicUpperBias.clear(); + + // Temporarily add weighted sum layer to the NLR of the same size of the output layer. + Layer *outputLayer = _layerIndexToLayer[getNumberOfLayers() - 1]; + unsigned outputLayerIndex = outputLayer->getLayerIndex(); + unsigned outputLayerSize = outputLayer->getSize(); + unsigned newLayerIndex = outputLayerIndex + 1; + + addLayer( newLayerIndex, Layer::WEIGHTED_SUM, outputLayerSize ); + addLayerDependency( outputLayerIndex, newLayerIndex ); + Layer *newLayer = _layerIndexToLayer[newLayerIndex]; + + for ( unsigned i = 0; i < outputLayerSize; ++i ) + { + setWeight( outputLayerIndex, i, newLayerIndex, i, 1 ); + newLayer->setLb( i, FloatUtils::infinity() ); + newLayer->setUb( i, FloatUtils::negativeInfinity() ); + } + + // Initialize maps with zero vectors. + for ( const auto &pair : _layerIndexToLayer ) + { + unsigned layerIndex = pair.first; + Layer *layer = pair.second; + unsigned size = layer->getSize(); + Layer::Type layerType = layer->getLayerType(); + + _outputSymbolicLb[layerIndex] = Vector( outputLayerSize * size, 0 ); + _outputSymbolicUb[layerIndex] = Vector( outputLayerSize * size, 0 ); + _outputSymbolicLowerBias[layerIndex] = Vector( outputLayerSize, 0 ); + _outputSymbolicUpperBias[layerIndex] = Vector( outputLayerSize, 0 ); + + if ( layerType != Layer::WEIGHTED_SUM && layerType != Layer::INPUT ) + { + unsigned maxSourceSize = 0; + for ( unsigned i = 0; i < size; ++i ) + { + unsigned sourceSize = layer->getActivationSources( i ).size(); + maxSourceSize = sourceSize > maxSourceSize ? sourceSize : maxSourceSize; + } + _predecessorSymbolicLb[layerIndex] = Vector( size * maxSourceSize, 0 ); + _predecessorSymbolicUb[layerIndex] = Vector( size * maxSourceSize, 0 ); + _predecessorSymbolicLowerBias[layerIndex] = Vector( size, 0 ); + _predecessorSymbolicUpperBias[layerIndex] = Vector( size, 0 ); + } + } + + // Populate symbolic bounds maps via DeepPoly. + bool useParameterisedSBT = coeffs.size() > 0; + Map> layerIndicesToParameters = + Map>( {} ); + if ( useParameterisedSBT ) + { + layerIndicesToParameters = getParametersForLayers( coeffs ); + } + + _deepPolyAnalysis = std::unique_ptr( + new DeepPolyAnalysis( this, + storeSymbolicBounds, + storeSymbolicBounds, + useParameterisedSBT, + &layerIndicesToParameters, + &_outputSymbolicLb, + &_outputSymbolicUb, + &_outputSymbolicLowerBias, + &_outputSymbolicUpperBias, + &_predecessorSymbolicLb, + &_predecessorSymbolicUb, + &_predecessorSymbolicLowerBias, + &_predecessorSymbolicUpperBias ) ); + + // Clear deepPolyAnalysis pointer after running. + _deepPolyAnalysis->run(); + _deepPolyAnalysis = nullptr; + + // Remove new weighted sum layer. + removeLayerDependency( outputLayerIndex, newLayerIndex ); + _layerIndexToLayer.erase( newLayerIndex ); + if ( newLayer ) + { + delete newLayer; + newLayer = NULL; + } + } +} + void NetworkLevelReasoner::lpRelaxationPropagation() { LPFormulator lpFormulator( this ); @@ -254,7 +393,15 @@ void NetworkLevelReasoner::lpRelaxationPropagation() lpFormulator.optimizeBoundsWithLpRelaxation( _layerIndexToLayer, true ); else if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) - optimizeBoundsWithPreimageApproximation( lpFormulator ); + { + const Vector optimalCoeffs = OptimalParameterisedSymbolicBoundTightening(); + Map> layerIndicesToParameters = + getParametersForLayers( optimalCoeffs ); + lpFormulator.optimizeBoundsWithLpRelaxation( + _layerIndexToLayer, false, layerIndicesToParameters ); + lpFormulator.optimizeBoundsWithLpRelaxation( + _layerIndexToLayer, true, layerIndicesToParameters ); + } else if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP || Options::get()->getMILPSolverBoundTighteningType() == @@ -263,7 +410,17 @@ void NetworkLevelReasoner::lpRelaxationPropagation() MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT || Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) - optimizeBoundsWithPMNR( lpFormulator ); + { + const Vector &polygonalTightenings = + OptimizeParameterisedPolygonalTightening(); + unsigned parameterCount = getNumberOfParameters(); + const Vector &coeffs = Vector( parameterCount, 0 ); + Map> layerIndicesToParameters = getParametersForLayers( coeffs ); + lpFormulator.optimizeBoundsWithLpRelaxation( + _layerIndexToLayer, false, layerIndicesToParameters, polygonalTightenings ); + lpFormulator.optimizeBoundsWithLpRelaxation( + _layerIndexToLayer, true, layerIndicesToParameters, polygonalTightenings ); + } else if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::LP_RELAXATION ) lpFormulator.optimizeBoundsWithLpRelaxation( _layerIndexToLayer ); @@ -272,30 +429,6 @@ void NetworkLevelReasoner::lpRelaxationPropagation() lpFormulator.optimizeBoundsWithIncrementalLpRelaxation( _layerIndexToLayer ); } -void NetworkLevelReasoner::optimizeBoundsWithPreimageApproximation( LPFormulator &lpFormulator ) -{ - const Vector &optimalCoeffs = OptimalParameterisedSymbolicBoundTightening(); - Map> layerIndicesToParameters = - getParametersForLayers( optimalCoeffs ); - lpFormulator.optimizeBoundsWithLpRelaxation( - _layerIndexToLayer, false, layerIndicesToParameters ); - lpFormulator.optimizeBoundsWithLpRelaxation( - _layerIndexToLayer, true, layerIndicesToParameters ); -} - -void NetworkLevelReasoner::optimizeBoundsWithPMNR( LPFormulator &lpFormulator ) -{ - const Vector &polygonalTightenings = - OptimizeParameterisedPolygonalTightening(); - unsigned parameterCount = getNumberOfParameters(); - const Vector &coeffs = Vector( parameterCount, 0 ); - Map> layerIndicesToParameters = getParametersForLayers( coeffs ); - lpFormulator.optimizeBoundsWithLpRelaxation( - _layerIndexToLayer, false, layerIndicesToParameters, polygonalTightenings ); - lpFormulator.optimizeBoundsWithLpRelaxation( - _layerIndexToLayer, true, layerIndicesToParameters, polygonalTightenings ); -} - void NetworkLevelReasoner::LPTighteningForOneLayer( unsigned targetIndex ) { LPFormulator lpFormulator( this ); @@ -737,12 +870,12 @@ double NetworkLevelReasoner::getPreviousBias( const ReluConstraint *reluConstrai return _previousBiases[reluConstraint]; } -Vector NetworkLevelReasoner::getOutputSymbolicLb( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getOutputSymbolicLb( unsigned layerIndex ) { // Initialize map if empty. if ( _outputSymbolicLb.empty() ) { - const_cast( this )->initializeSymbolicBoundsMaps(); + parameterisedDeepPoly( true ); } if ( !_outputSymbolicLb.exists( layerIndex ) ) @@ -754,12 +887,12 @@ Vector NetworkLevelReasoner::getOutputSymbolicLb( unsigned layerIndex ) return _outputSymbolicLb[layerIndex]; } -Vector NetworkLevelReasoner::getOutputSymbolicUb( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getOutputSymbolicUb( unsigned layerIndex ) { // Initialize map if empty. if ( _outputSymbolicUb.empty() ) { - const_cast( this )->initializeSymbolicBoundsMaps(); + parameterisedDeepPoly( true ); } if ( !_outputSymbolicUb.exists( layerIndex ) ) @@ -771,12 +904,12 @@ Vector NetworkLevelReasoner::getOutputSymbolicUb( unsigned layerIndex ) return _outputSymbolicUb[layerIndex]; } -Vector NetworkLevelReasoner::getOutputSymbolicLowerBias( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getOutputSymbolicLowerBias( unsigned layerIndex ) { // Initialize map if empty. if ( _outputSymbolicLowerBias.empty() ) { - const_cast( this )->initializeSymbolicBoundsMaps(); + parameterisedDeepPoly( true ); } if ( !_outputSymbolicLowerBias.exists( layerIndex ) ) @@ -788,12 +921,12 @@ Vector NetworkLevelReasoner::getOutputSymbolicLowerBias( unsigned layerI return _outputSymbolicLowerBias[layerIndex]; } -Vector NetworkLevelReasoner::getOutputSymbolicUpperBias( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getOutputSymbolicUpperBias( unsigned layerIndex ) { // Initialize map if empty. if ( _outputSymbolicUpperBias.empty() ) { - const_cast( this )->initializeSymbolicBoundsMaps(); + parameterisedDeepPoly( true ); } if ( !_outputSymbolicUpperBias.exists( layerIndex ) ) @@ -805,12 +938,12 @@ Vector NetworkLevelReasoner::getOutputSymbolicUpperBias( unsigned layerI return _outputSymbolicUpperBias[layerIndex]; } -Vector NetworkLevelReasoner::getPredecessorSymbolicLb( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getPredecessorSymbolicLb( unsigned layerIndex ) { // Initialize map if empty. if ( _predecessorSymbolicLb.empty() ) { - const_cast( this )->initializeSymbolicBoundsMaps(); + parameterisedDeepPoly( true ); } if ( !_predecessorSymbolicLb.exists( layerIndex ) ) @@ -822,12 +955,12 @@ Vector NetworkLevelReasoner::getPredecessorSymbolicLb( unsigned layerInd return _predecessorSymbolicLb[layerIndex]; } -Vector NetworkLevelReasoner::getPredecessorSymbolicUb( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getPredecessorSymbolicUb( unsigned layerIndex ) { // Initialize map if empty. if ( _predecessorSymbolicUb.empty() ) { - const_cast( this )->initializeSymbolicBoundsMaps(); + parameterisedDeepPoly( true ); } if ( !_predecessorSymbolicUb.exists( layerIndex ) ) @@ -839,12 +972,12 @@ Vector NetworkLevelReasoner::getPredecessorSymbolicUb( unsigned layerInd return _predecessorSymbolicUb[layerIndex]; } -Vector NetworkLevelReasoner::getPredecessorSymbolicLowerBias( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getPredecessorSymbolicLowerBias( unsigned layerIndex ) { // Initialize map if empty. if ( _predecessorSymbolicLowerBias.empty() ) { - const_cast( this )->initializeSymbolicBoundsMaps(); + parameterisedDeepPoly( true ); } if ( !_predecessorSymbolicLowerBias.exists( layerIndex ) ) @@ -856,12 +989,12 @@ Vector NetworkLevelReasoner::getPredecessorSymbolicLowerBias( unsigned l return _predecessorSymbolicLowerBias[layerIndex]; } -Vector NetworkLevelReasoner::getPredecessorSymbolicUpperBias( unsigned layerIndex ) const +Vector NetworkLevelReasoner::getPredecessorSymbolicUpperBias( unsigned layerIndex ) { // Initialize map if empty. if ( _predecessorSymbolicUpperBias.empty() ) { - const_cast( this )->initializeSymbolicBoundsMaps(); + parameterisedDeepPoly( true ); } if ( !_predecessorSymbolicUpperBias.exists( layerIndex ) ) @@ -873,12 +1006,12 @@ Vector NetworkLevelReasoner::getPredecessorSymbolicUpperBias( unsigned l return _predecessorSymbolicUpperBias[layerIndex]; } -double NetworkLevelReasoner::getPMNRScore( NeuronIndex index ) const +double NetworkLevelReasoner::getPMNRScore( NeuronIndex index ) { // Initialize map if empty. if ( _neuronToPMNRScores.empty() ) { - const_cast( this )->initializePMNRScoreMap(); + initializePMNRScoreMap(); } if ( !_neuronToPMNRScores.exists( index ) ) @@ -889,12 +1022,12 @@ double NetworkLevelReasoner::getPMNRScore( NeuronIndex index ) const return _neuronToPMNRScores[index]; } -Map NetworkLevelReasoner::getBBPSBranchingPoint( NeuronIndex index ) const +std::pair NetworkLevelReasoner::getBBPSBranchingPoint( NeuronIndex index ) { // Initialize map if empty. if ( _neuronToBBPSBranchingPoints.empty() ) { - const_cast( this )->initializeBBPSBranchingMap(); + initializeBBPSBranchingMaps(); } if ( !_neuronToBBPSBranchingPoints.exists( index ) ) @@ -906,6 +1039,74 @@ Map NetworkLevelReasoner::getBBPSBranchingPoint( NeuronInde return _neuronToBBPSBranchingPoints[index]; } +Vector NetworkLevelReasoner::getSymbolicLbPerBranch( NeuronIndex index ) +{ + // Initialize map if empty. + if ( _neuronToSymbolicLbPerBranch.empty() ) + { + initializeBBPSBranchingMaps(); + } + + if ( !_neuronToSymbolicLbPerBranch.exists( index ) ) + { + throw NLRError( NLRError::NEURON_NOT_FOUND, + "Neuron not found in BBPS branch symbolic bounds map." ); + } + + return _neuronToSymbolicLbPerBranch[index]; +} + +Vector NetworkLevelReasoner::getSymbolicUbPerBranch( NeuronIndex index ) +{ + // Initialize map if empty. + if ( _neuronToSymbolicUbPerBranch.empty() ) + { + initializeBBPSBranchingMaps(); + } + + if ( !_neuronToSymbolicUbPerBranch.exists( index ) ) + { + throw NLRError( NLRError::NEURON_NOT_FOUND, + "Neuron not found in BBPS branch symbolic bounds map." ); + } + + return _neuronToSymbolicUbPerBranch[index]; +} + +Vector NetworkLevelReasoner::getSymbolicLowerBiasPerBranch( NeuronIndex index ) +{ + // Initialize map if empty. + if ( _neuronToSymbolicLowerBiasPerBranch.empty() ) + { + initializeBBPSBranchingMaps(); + } + + if ( !_neuronToSymbolicLowerBiasPerBranch.exists( index ) ) + { + throw NLRError( NLRError::NEURON_NOT_FOUND, + "Neuron not found in BBPS branch symbolic bounds map." ); + } + + return _neuronToSymbolicLowerBiasPerBranch[index]; +} + +Vector NetworkLevelReasoner::getSymbolicUpperBiasPerBranch( NeuronIndex index ) +{ + // Initialize map if empty. + if ( _neuronToSymbolicUpperBiasPerBranch.empty() ) + { + initializeBBPSBranchingMaps(); + } + + if ( !_neuronToSymbolicUpperBiasPerBranch.exists( index ) ) + { + throw NLRError( NLRError::NEURON_NOT_FOUND, + "Neuron not found in BBPS branch symbolic bounds map." ); + } + + return _neuronToSymbolicUpperBiasPerBranch[index]; +} + unsigned NetworkLevelReasoner::mergeConsecutiveWSLayers( const Map &lowerBounds, const Map &upperBounds, @@ -1153,226 +1354,679 @@ double NetworkLevelReasoner::calculateDifferenceFromSymbolic( const Layer *layer return std::max( layer->getUb( i ) - upperSum, lowerSum - layer->getLb( i ) ); } -const Vector NetworkLevelReasoner::OptimizeParameterisedPolygonalTightening() +const Vector NetworkLevelReasoner::generatePolygonalTightenings() { - computeSuccessorLayers(); - const Vector &selectedTightenings = generatePolygonalTightenings(); - const unsigned size = selectedTightenings.size(); - Vector optimizedTightenings = Vector( {} ); - for ( unsigned i = 0; i < size; ++i ) + if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP ) { - PolygonalTightening tightening = selectedTightenings[i]; - double lowerBound = - OptimizeSingleParameterisedPolygonalTightening( tightening, optimizedTightenings ); - tightening._value = lowerBound; - optimizedTightenings.append( tightening ); + return generatePolygonalTighteningsForInvprop(); } - const Vector tightenings( optimizedTightenings ); - return tightenings; + else if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) + { + return generatePolygonalTighteningsForPMNR(); + } + + const Vector tighteningsVector = Vector( {} ); + return tighteningsVector; } -double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTightening( - PolygonalTightening &tightening, - Vector &prevTightenings ) +const Vector NetworkLevelReasoner::generatePolygonalTighteningsForInvprop() { - // Search over coeffs in [0, 1]^numberOfParameters, gamma in - // [0, inf)^sizeOfPrevTightenings with PGD. - // unsigned maxIterations = GlobalConfiguration::INVPROP_MAX_ITERATIONS; // ... - unsigned maxIterations = 1000; - // double coeffsStepSize = GlobalConfiguration::INVPROP_STEP_SIZE; - // double gammaStepSize = GlobalConfiguration::INVPROP_STEP_SIZE; - double coeffsStepSize = 0.025; - double gammaStepSize = 0.0025; - double epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; - double weightDecay = GlobalConfiguration::INVPROP_WEIGHT_DECAY; - double lr = GlobalConfiguration::INVPROP_LEARNING_RATE; - unsigned coeffsDimension = getNumberOfParameters(); - unsigned gammaDimension = prevTightenings.size(); - bool maximize = ( tightening._type == PolygonalTightening::LB ); - double sign = ( maximize ? 1 : -1 ); - double bestBound = sign * tightening._value; - - Vector coeffsLowerBounds( coeffsDimension, 0 ); - Vector coeffsUpperBounds( coeffsDimension, 1 ); - Vector gammaLowerBounds( gammaDimension, 0 ); - - Vector coeffs( coeffsDimension, GlobalConfiguration::INVPROP_INITIAL_ALPHA ); - Vector gamma( gammaDimension, GlobalConfiguration::INVPROP_INITIAL_GAMMA ); - - Vector> coeffsCandidates( coeffsDimension ); - Vector> gammaCandidates( gammaDimension ); - Vector coeffsGradient( coeffsDimension ); - Vector gammaGradient( gammaDimension ); - - for ( unsigned i = 0; i < maxIterations; ++i ) + // Polygonal tightenings for INVPROP are box constraints for every non-input neuron. + Vector tightenings = Vector( {} ); + for ( const auto &pair : _layerIndexToLayer ) { - double currentCost = getParameterisdPolygonalTighteningLowerBound( - coeffs, gamma, tightening, prevTightenings ); - for ( unsigned j = 0; j < coeffsDimension; ++j ) + Layer *layer = pair.second; + for ( unsigned i = 0; i < layer->getSize(); ++i ) { - coeffsCandidates[j] = Vector( coeffs ); - coeffsCandidates[j][j] += coeffsStepSize; - - if ( coeffs[j] <= coeffsLowerBounds[j] || coeffs[j] >= coeffsUpperBounds[j] || - coeffsCandidates[j][j] > coeffsUpperBounds[j] || - coeffsCandidates[j][j] < coeffsLowerBounds[j] ) + NeuronIndex index( pair.first, i ); + if ( layer->getLayerType() == Layer::WEIGHTED_SUM || layer->neuronNonfixed( i ) ) { - coeffsGradient[j] = 0; - continue; + Map lowerCoeffs; + Map upperCoeffs; + lowerCoeffs[index] = 1; + upperCoeffs[index] = 1; + PolygonalTightening lowerTightening( + lowerCoeffs, layer->getLb( i ), PolygonalTightening::LB ); + PolygonalTightening upperTightening( + upperCoeffs, layer->getUb( i ), PolygonalTightening::UB ); + tightenings.append( lowerTightening ); + tightenings.append( upperTightening ); } - - double cost = getParameterisdPolygonalTighteningLowerBound( - coeffsCandidates[j], gamma, tightening, prevTightenings ); - coeffsGradient[j] = ( cost - currentCost ) / coeffsStepSize + weightDecay * coeffs[j]; - bestBound = ( maximize ? std::max( bestBound, cost ) : std::min( bestBound, cost ) ); } + } + const Vector tighteningsVector = + Vector( tightenings ); + return tighteningsVector; +} - for ( unsigned j = 0; j < gammaDimension; ++j ) - { - gammaCandidates[j] = Vector( gamma ); - gammaCandidates[j][j] += gammaStepSize; +const Vector NetworkLevelReasoner::generatePolygonalTighteningsForPMNR() +{ + Vector tightenings = Vector( {} ); + Vector lowerDeepPolyTightenings = Vector( {} ); + Vector upperDeepPolyTightenings = Vector( {} ); + const Vector neurons = selectPMNRNeurons(); + unsigned neuronCount = neurons.size(); - if ( gamma[j] <= gammaLowerBounds[j] || gammaCandidates[j][j] < gammaLowerBounds[j] ) - { - gammaGradient[j] = 0; - continue; - } + // Initial tightenings are non-fixed neurons and their DeepPoly symbolic bounds. + for ( const auto &pair : neurons ) + { + unsigned layerIndex = pair._layer; + unsigned neuron = pair._neuron; + Layer *layer = _layerIndexToLayer[layerIndex]; + unsigned size = layer->getSize(); - double cost = getParameterisdPolygonalTighteningLowerBound( - coeffs, gammaCandidates[j], tightening, prevTightenings ); - gammaGradient[j] = ( currentCost - cost ) / gammaStepSize + weightDecay * gamma[j]; - bestBound = ( maximize ? std::max( bestBound, cost ) : std::min( bestBound, cost ) ); + Map lowerCoeffs; + Map upperCoeffs; + lowerCoeffs[pair] = 1; + upperCoeffs[pair] = -1; + + unsigned inputIndex = 0; + List sources = layer->getActivationSources( neuron ); + for ( const auto &sourceIndex : sources ) + { + lowerCoeffs[sourceIndex] = + -getPredecessorSymbolicLb( layerIndex )[size * inputIndex + neuron]; + upperCoeffs[sourceIndex] = + getPredecessorSymbolicUb( layerIndex )[size * inputIndex + neuron]; + ++inputIndex; } + PolygonalTightening lowerTightening( lowerCoeffs, + getPredecessorSymbolicLowerBias( layerIndex )[neuron], + PolygonalTightening::LB ); + PolygonalTightening upperTightening( upperCoeffs, + -getPredecessorSymbolicUpperBias( layerIndex )[neuron], + PolygonalTightening::LB ); + lowerDeepPolyTightenings.append( lowerTightening ); + upperDeepPolyTightenings.append( upperTightening ); + } - bool gradientIsZero = true; - for ( unsigned j = 0; j < coeffsDimension; ++j ) + /* + If DeepPoly bounds are x_f - \sum a_u_i x_i <= b_u, \sum x_f - a_u_i x_i >= b_l, PMNR + tightenings are linear combinations of x_f - \sum a_u_i x_i, coeffs in {-1, 0, 1}, and + linear combinations of x_f - \sum a_l_i x_i, coeffs in {-1, 0, 1}. + */ + const Vector weights = Vector( { -1, 0, 1 } ); + unsigned weightCount = weights.size(); + unsigned range = std::pow( weightCount, neuronCount ); + for ( unsigned i = 0; i < range; ++i ) + { + // Keep track of whether all coefficients for current tightening are non-negative, + // and count non-zero coefficients. + unsigned nonZeroWeights = 0; + bool allNonnegative = true; + Map lowerCoeffs; + Map upperCoeffs; + for ( unsigned j = 0; j < neuronCount; ++j ) { - if ( FloatUtils::abs( coeffsGradient[j] ) > epsilon ) + unsigned mask = std::pow( weightCount, j ); + unsigned flag = ( i / mask ) % weightCount; + double weight = weights[flag]; + if ( weight < 0 ) { - gradientIsZero = false; + allNonnegative = false; } - } - for ( unsigned j = 0; j < gammaDimension; ++j ) - { - if ( FloatUtils::abs( gammaGradient[j] ) > epsilon ) + if ( weight != 0 ) { - gradientIsZero = false; + ++nonZeroWeights; + } + + // Compute linear combinations of DeepPoly tightenings. + for ( const auto &pair : lowerDeepPolyTightenings[j]._neuronToCoefficient ) + { + if ( !lowerCoeffs.exists( pair.first ) ) + { + lowerCoeffs[pair.first] = weight * pair.second; + } + else + { + lowerCoeffs[pair.first] = lowerCoeffs[pair.first] + weight * pair.second; + } + } + for ( const auto &pair : upperDeepPolyTightenings[j]._neuronToCoefficient ) + { + if ( !upperCoeffs.exists( pair.first ) ) + { + upperCoeffs[pair.first] = weight * pair.second; + } + else + { + upperCoeffs[pair.first] = upperCoeffs[pair.first] + weight * pair.second; + } } } - if ( gradientIsZero ) + + // No need to tighten the original DeepPoly bounds. + if ( nonZeroWeights <= 1 ) { - break; + continue; } - for ( unsigned j = 0; j < coeffsDimension; ++j ) + // Calculate initial concrete lower bound for all tightenings. If all coefficients are, + // non-negative, compute lower bounds by adding the DeepPoly tightenings' lower bounds. + double lowerTighteningLb = 0; + double upperTighteningLb = 0; + if ( allNonnegative ) { - coeffs[j] += sign * lr * coeffsGradient[j]; - coeffs[j] = std::min( coeffs[j], coeffsUpperBounds[j] ); - coeffs[j] = std::max( coeffs[j], coeffsLowerBounds[j] ); + for ( unsigned j = 0; j < neuronCount; ++j ) + { + unsigned mask = std::pow( weightCount, j ); + unsigned flag = ( i / mask ) % weightCount; + double weight = weights[flag]; + lowerTighteningLb += weight * lowerDeepPolyTightenings[j]._value; + upperTighteningLb += weight * upperDeepPolyTightenings[j]._value; + } } - for ( unsigned j = 0; j < gammaDimension; ++j ) + + // If some weights are negative, compute lower bounds by concretizing. + else { - gamma[j] += sign * lr * gammaGradient[j]; - gamma[j] = std::max( gamma[j], gammaLowerBounds[j] ); + for ( const auto &pair : lowerCoeffs ) + { + double neuronLb = + _layerIndexToLayer[pair.first._layer]->getLb( pair.first._neuron ); + double neuronUb = + _layerIndexToLayer[pair.first._layer]->getUb( pair.first._neuron ); + lowerTighteningLb += + pair.second >= 0 ? pair.second * neuronLb : pair.second * neuronUb; + } + for ( const auto &pair : upperCoeffs ) + { + double neuronLb = + _layerIndexToLayer[pair.first._layer]->getLb( pair.first._neuron ); + double neuronUb = + _layerIndexToLayer[pair.first._layer]->getUb( pair.first._neuron ); + upperTighteningLb += + pair.second >= 0 ? pair.second * neuronLb : pair.second * neuronUb; + } } + PolygonalTightening lowerTightening( + lowerCoeffs, lowerTighteningLb, PolygonalTightening::LB ); + PolygonalTightening upperTightening( + upperCoeffs, upperTighteningLb, PolygonalTightening::LB ); + tightenings.append( lowerTightening ); + tightenings.append( upperTightening ); } - return bestBound; + const Vector tighteningsVector = + Vector( tightenings ); + return tighteningsVector; } -double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( - const Vector &coeffs, - const Vector &gamma, - PolygonalTightening &tightening, - Vector &prevTightenings ) +const Vector NetworkLevelReasoner::selectPMNRNeurons() { - // First, run parameterised nitializeSymbolicBoundsMaps. - initializeSymbolicBoundsMaps( coeffs ); - - // Recursively compute mu, muHat for every layer. - const unsigned numLayers = _layerIndexToLayer.size(); - const unsigned maxLayer = _layerIndexToLayer.size() - 1; - const unsigned prevTigheningsCount = prevTightenings.size(); - const unsigned inputLayerSize = _layerIndexToLayer[0]->getSize(); - - Vector> mu( numLayers ); - Vector> muHat( numLayers ); - - for ( int index = maxLayer; index >= 0; --index ) + switch ( Options::get()->getMILPSolverBoundTighteningType() ) + { + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM: + { + return selectPMNRNeuronsRandomly(); + break; + } + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS: + { + return selectPMNRNeuronsHeuristically(); + break; + } + default: { - Layer *layer = _layerIndexToLayer[index]; - const unsigned layerSize = layer->getSize(); - const unsigned layerIndex = layer->getLayerIndex(); + Vector emptyVector = Vector( {} ); + return emptyVector; + } + } +} - mu[layerIndex] = Vector( layerSize, 0 ); - muHat[layerIndex] = Vector( layerSize, 0 ); +const Vector NetworkLevelReasoner::selectPMNRNeuronsRandomly() +{ + // Randomly select layer with nonfixed neurons. + const Vector &candidateLayers = getLayersWithNonfixedNeurons(); + if ( candidateLayers.empty() ) + { + const Vector emptyVector( {} ); + return emptyVector; + } + std::mt19937_64 rng( GlobalConfiguration::PMNR_RANDOM_SEED ); + std::uniform_int_distribution dis( 0, candidateLayers.size() - 1 ); + unsigned entry = dis( rng ); + unsigned index = candidateLayers[entry]; + + // Randomly select nonfixed neurons from this layer. + Layer *layer = _layerIndexToLayer[index]; + const Vector &candidateNeurons = layer->getNonfixedNeurons(); + std::vector candidateNeuronsVector = candidateNeurons.getContainer(); + std::shuffle( candidateNeuronsVector.begin(), candidateNeuronsVector.end(), rng ); + + unsigned neuronCount = + std::min( GlobalConfiguration::PMNR_SELECTED_NEURONS, candidateNeurons.size() ); + Vector selectedNeurons = Vector( neuronCount ); + for ( unsigned i = 0; i < neuronCount; ++i ) + { + selectedNeurons[i] = candidateNeuronsVector[i]; + } + const Vector neurons( selectedNeurons ); + return neurons; +} + +const Vector NetworkLevelReasoner::selectPMNRNeuronsHeuristically() +{ + // Select layer with maximal PMNR neuron score sum. + const Vector &candidateLayers = getLayersWithNonfixedNeurons(); + if ( candidateLayers.empty() ) + { + const Vector emptyVector( {} ); + return emptyVector; + } + + double maxScore = 0; + unsigned maxScoreIndex = 0; + for ( const auto &layerIndex : candidateLayers ) + { + double layerScore = 0; + Layer *layer = _layerIndexToLayer[layerIndex]; + for ( const auto &index : layer->getNonfixedNeurons() ) + { + double neuronScore = getPMNRScore( index ); + layerScore += neuronScore; + } + + if ( layerScore > maxScore ) + { + maxScore = layerScore; + maxScoreIndex = layerIndex; + } + } + + // Extract highest score neurons from this layer. + Layer *layer = _layerIndexToLayer[maxScoreIndex]; + std::priority_queue, + std::vector>, + std::less>> + maxQueue; + const Vector nonfixedNeurons = layer->getNonfixedNeurons(); + for ( const auto &index : nonfixedNeurons ) + { + maxQueue.push( std::pair( getPMNRScore( index ), index._neuron ) ); + } + + unsigned neuronCount = + std::min( GlobalConfiguration::PMNR_SELECTED_NEURONS, nonfixedNeurons.size() ); + Vector selectedNeurons = Vector( neuronCount ); + for ( unsigned i = 0; i < neuronCount; ++i ) + { + selectedNeurons[i] = NeuronIndex( maxScoreIndex, maxQueue.top().second ); + maxQueue.pop(); + } + const Vector neurons( selectedNeurons ); + return neurons; +} + +const Vector NetworkLevelReasoner::OptimizeParameterisedPolygonalTightening() +{ + // Calculate successor layers, PMNR scores, symbolic bound maps before optimizing. + computeSuccessorLayers(); + parameterisedDeepPoly( true ); + initializePMNRScoreMap(); + + // Repeatedly optimize polygonal tightenings given previously optimized ones. + const Vector &selectedTightenings = generatePolygonalTightenings(); + Vector optimizedTightenings = Vector( {} ); + for ( unsigned i = 0; i < selectedTightenings.size(); ++i ) + { + PolygonalTightening tightening = selectedTightenings[i]; + bool maximize = ( tightening._type == PolygonalTightening::LB ); + double feasibiltyBound = maximize ? FloatUtils::infinity() : FloatUtils::negativeInfinity(); + double bound = OptimizeSingleParameterisedPolygonalTightening( + tightening, optimizedTightenings, maximize, feasibiltyBound ); + + // For PMNR, attempt to obtain stronger bound by branching selected neurons if supported. + if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) + { + tightening._value = OptimizeSingleParameterisedPolygonalTighteningWithBranching( + tightening, optimizedTightenings, maximize, bound ); + } + else + { + tightening._value = bound; + } + optimizedTightenings.append( tightening ); + + // Store optimized tightenings in NLR. + receivePolygonalTightening( tightening ); + } + + const Vector tightenings( optimizedTightenings ); + return tightenings; +} + +double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTighteningWithBranching( + PolygonalTightening &tightening, + Vector &prevTightenings, + bool maximize, + double originalBound ) +{ + // Determine which of the selected neurons support branching. + const Vector selectedNeurons = selectPMNRNeurons(); + Vector neurons = Vector( {} ); + for ( const auto &index : selectedNeurons ) + { + const Layer *layer = _layerIndexToLayer[index._layer]; + if ( layer->neuronNonfixed( index._neuron ) && + supportsInvpropBranching( layer->getLayerType() ) ) + { + neurons.append( index ); + } + } + if ( neurons.empty() ) + return originalBound; + + // Re-optimize current tightening for every branch combination. If we seek to maximize + // the tightening's bound, select minimal score of all combinations, and vice versa. + bool maximizeBranchBound = !maximize; + double newBound = maximizeBranchBound ? FloatUtils::negativeInfinity() : FloatUtils::infinity(); + unsigned neuronCount = neurons.size(); + Vector branchCounts( neuronCount, 0 ); + for ( unsigned i = 0; i < neuronCount; ++i ) + { + branchCounts[i] = getSymbolicLbPerBranch( neurons[i] ).size(); + } + unsigned range = + std::accumulate( branchCounts.begin(), branchCounts.end(), 1, std::multiplies() ); + for ( unsigned i = 0; i < range; ++i ) + { + Map neuronToBranchIndex; + for ( unsigned j = 0; j < neuronCount; ++j ) + { + unsigned mask = std::accumulate( branchCounts.begin(), + std::next( branchCounts.begin(), j ), + 1, + std::multiplies() ); + unsigned branchIndex = ( i / mask ) % branchCounts[j]; + NeuronIndex index = neurons[j]; + neuronToBranchIndex.insert( index, branchIndex ); + } + + // To determine some of the infeasible branch combinations, calculate a feasibility bound + // (known upper/lower bound for max/min problem) with concretization. + double feasibilityBound = 0; + for ( const auto &pair : tightening._neuronToCoefficient ) + { + double ub = _layerIndexToLayer[pair.first._layer]->getUb( pair.first._neuron ); + double lb = _layerIndexToLayer[pair.first._layer]->getLb( pair.first._neuron ); + if ( maximize ) + { + feasibilityBound += pair.second > 0 ? pair.second * ub : pair.second * lb; + } + else + { + feasibilityBound += pair.second > 0 ? pair.second * lb : pair.second * ub; + } + } + + double branchBound = OptimizeSingleParameterisedPolygonalTightening( + tightening, prevTightenings, maximize, feasibilityBound, neuronToBranchIndex ); + + // If bound is stronger than known feasibility bound, store branch combination in NLR. + if ( !FloatUtils::isFinite( branchBound ) || maximize ? branchBound > feasibilityBound + : branchBound < feasibilityBound ) + { + receiveInfeasibleBranches( neuronToBranchIndex ); + } + else + { + newBound = maximizeBranchBound ? std::max( branchBound, newBound ) + : std::min( branchBound, newBound ); + } + } + + newBound = maximize ? std::max( originalBound, newBound ) : std::min( originalBound, newBound ); + return newBound; +} + +double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTightening( + PolygonalTightening &tightening, + Vector &prevTightenings, + bool maximize, + double feasibilityBound, + const Map &neuronToBranchIndex ) +{ + // Search over coeffs in [0, 1]^numberOfParameters, gamma in + // [0, inf)^sizeOfPrevTightenings with PGD. + unsigned maxIterations = GlobalConfiguration::INVPROP_MAX_ITERATIONS; + double coeffsStepSize = GlobalConfiguration::INVPROP_STEP_SIZE; + double gammaStepSize = GlobalConfiguration::INVPROP_STEP_SIZE; + double epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; + double weightDecay = GlobalConfiguration::INVPROP_WEIGHT_DECAY; + double lr = GlobalConfiguration::INVPROP_LEARNING_RATE; + unsigned coeffsDimension = getNumberOfParameters(); + unsigned gammaDimension = prevTightenings.size(); + double sign = ( maximize ? 1 : -1 ); + double bestBound = tightening._value; + + Vector coeffsLowerBounds( coeffsDimension, 0 ); + Vector coeffsUpperBounds( coeffsDimension, 1 ); + Vector gammaLowerBounds( gammaDimension, 0 ); + + Vector coeffs( coeffsDimension, GlobalConfiguration::INVPROP_INITIAL_ALPHA ); + Vector gamma( gammaDimension, GlobalConfiguration::INVPROP_INITIAL_GAMMA ); + Vector previousCoeffs( coeffs ); + Vector previousGamma( gamma ); + + Vector> coeffsCandidates( coeffsDimension ); + Vector> gammaCandidates( gammaDimension ); + Vector coeffsGradient( coeffsDimension ); + Vector gammaGradient( gammaDimension ); + + for ( unsigned i = 0; i < maxIterations; ++i ) + { + for ( unsigned j = 0; j < coeffsDimension; ++j ) + { + coeffs[j] += weightDecay * ( coeffs[j] - previousCoeffs[j] ); + coeffs[j] = std::min( coeffs[j], coeffsUpperBounds[j] ); + coeffs[j] = std::max( coeffs[j], coeffsLowerBounds[j] ); + } + for ( unsigned j = 0; j < gammaDimension; ++j ) + { + gamma[j] += weightDecay * ( gamma[j] - previousGamma[j] ); + gamma[j] = std::max( gamma[j], gammaLowerBounds[j] ); + } + + double currentCost = getParameterisdPolygonalTighteningBound( + coeffs, gamma, tightening, prevTightenings, neuronToBranchIndex ); + + // If calculated bound is stronger than known feasibility bound, stop optimization. + if ( !FloatUtils::isFinite( currentCost ) || maximize ? currentCost > feasibilityBound + : currentCost < feasibilityBound ) + { + return currentCost; + } + + for ( unsigned j = 0; j < coeffsDimension; ++j ) + { + coeffsCandidates[j] = Vector( coeffs ); + coeffsCandidates[j][j] += coeffsStepSize; + if ( coeffsCandidates[j][j] > coeffsUpperBounds[j] || + coeffsCandidates[j][j] < coeffsLowerBounds[j] ) + { + coeffsGradient[j] = 0; + continue; + } + + double cost = getParameterisdPolygonalTighteningBound( + coeffsCandidates[j], gamma, tightening, prevTightenings, neuronToBranchIndex ); + if ( !FloatUtils::isFinite( cost ) || maximize ? cost > feasibilityBound + : cost < feasibilityBound ) + { + return cost; + } + + coeffsGradient[j] = ( cost - currentCost ) / coeffsStepSize; + bestBound = ( maximize ? std::max( bestBound, cost ) : std::min( bestBound, cost ) ); + } + + for ( unsigned j = 0; j < gammaDimension; ++j ) + { + gammaCandidates[j] = Vector( gamma ); + gammaCandidates[j][j] += gammaStepSize; + if ( gammaCandidates[j][j] < gammaLowerBounds[j] ) + { + gammaGradient[j] = 0; + continue; + } + + double cost = getParameterisdPolygonalTighteningBound( + coeffs, gammaCandidates[j], tightening, prevTightenings, neuronToBranchIndex ); + if ( !FloatUtils::isFinite( cost ) || maximize ? cost > feasibilityBound + : cost < feasibilityBound ) + { + return cost; + } + + gammaGradient[j] = ( cost - currentCost ) / gammaStepSize; + bestBound = ( maximize ? std::max( bestBound, cost ) : std::min( bestBound, cost ) ); + } + + bool gradientIsZero = true; + for ( unsigned j = 0; j < coeffsDimension; ++j ) + { + if ( FloatUtils::abs( coeffsGradient[j] ) > epsilon ) + { + gradientIsZero = false; + } + } + for ( unsigned j = 0; j < gammaDimension; ++j ) + { + if ( FloatUtils::abs( gammaGradient[j] ) > epsilon ) + { + gradientIsZero = false; + } + } + if ( gradientIsZero ) + { + break; + } + for ( unsigned j = 0; j < coeffsDimension; ++j ) + { + previousCoeffs[j] = coeffs[j]; + coeffs[j] += sign * lr * coeffsGradient[j]; + coeffs[j] = std::min( coeffs[j], coeffsUpperBounds[j] ); + coeffs[j] = std::max( coeffs[j], coeffsLowerBounds[j] ); + } + for ( unsigned j = 0; j < gammaDimension; ++j ) + { + previousGamma[j] = gamma[j]; + gamma[j] += sign * lr * gammaGradient[j]; + gamma[j] = std::max( gamma[j], gammaLowerBounds[j] ); + } + } + + return bestBound; +} + +double NetworkLevelReasoner::getParameterisdPolygonalTighteningBound( + const Vector &coeffs, + const Vector &gamma, + PolygonalTightening &tightening, + Vector &prevTightenings, + const Map &neuronToBranchIndex ) +{ + // First, run parameterised DeepPoly. + parameterisedDeepPoly( true, coeffs ); + + // Recursively compute vectors mu, muHat for every layer with the backpropagation procedure. + unsigned numLayers = _layerIndexToLayer.size(); + unsigned maxLayer = _layerIndexToLayer.size() - 1; + unsigned prevTigheningsCount = prevTightenings.size(); + unsigned inputLayerSize = _layerIndexToLayer[0]->getSize(); + double sign = ( tightening._type == PolygonalTightening::LB ? 1 : -1 ); + Vector> mu( numLayers ); + Vector> muHat( numLayers ); + + for ( unsigned layerIndex = numLayers; layerIndex-- > 0; ) + { + Layer *layer = _layerIndexToLayer[layerIndex]; + unsigned size = layer->getSize(); + mu[layerIndex] = Vector( size, 0 ); + muHat[layerIndex] = Vector( size, 0 ); if ( layerIndex < maxLayer ) { - for ( unsigned i = 0; i < layerSize; ++i ) + for ( unsigned i = 0; i < size; ++i ) { - NeuronIndex neuron( layerIndex, i ); - for ( const unsigned successorLayerIndex : layer->getSuccessorLayers() ) + for ( unsigned successorIndex : layer->getSuccessorLayers() ) { - const Layer *successorLayer = getLayer( successorLayerIndex ); - const unsigned successorLayerSize = successorLayer->getSize(); - const Layer::Type successorLayerType = successorLayer->getLayerType(); + const Layer *successorLayer = _layerIndexToLayer[successorIndex]; + unsigned successorSize = successorLayer->getSize(); - if ( successorLayerType == Layer::WEIGHTED_SUM ) + if ( successorLayer->getLayerType() == Layer::WEIGHTED_SUM ) { - const double *successorWeights = - successorLayer->getWeightMatrix( layerIndex ); - - for ( unsigned j = 0; j < successorLayerSize; ++j ) + const double *weights = successorLayer->getWeightMatrix( layerIndex ); + for ( unsigned j = 0; j < successorSize; ++j ) { if ( !successorLayer->neuronEliminated( j ) ) { muHat[layerIndex][i] += - mu[successorLayerIndex][j] * - successorWeights[i * successorLayerSize + j]; + mu[successorIndex][j] * weights[i * successorSize + j]; } } } else { - for ( unsigned j = 0; j < successorLayerSize; ++j ) + for ( unsigned j = 0; j < successorSize; ++j ) { - // Find the index of the current neuron in the activation source list. - unsigned predecessorIndex = 0; + // Find the index of the current neuron in the successor's activation + // sources list. bool found = false; + unsigned inputIndex = 0; List sources = successorLayer->getActivationSources( j ); for ( const auto &sourceIndex : sources ) { - if ( sourceIndex != neuron ) - { - if ( !found ) - { - ++predecessorIndex; - } - } - else + if ( sourceIndex._layer == layerIndex && sourceIndex._neuron == i ) { found = true; + break; } + ++inputIndex; } - + NeuronIndex successor( successorIndex, j ); if ( found ) { if ( !successorLayer->neuronEliminated( j ) ) { - if ( mu[successorLayerIndex][j] >= 0 ) + // When branching selected neurons, use predecessor symbolic + // bounds for current branch. + if ( neuronToBranchIndex.exists( successor ) ) { muHat[layerIndex][i] += - mu[successorLayerIndex][j] * - getPredecessorSymbolicUb( successorLayerIndex ) - [successorLayerSize * predecessorIndex + j]; + std::max( mu[successorIndex][j], 0.0 ) * + getSymbolicUbPerBranch( + successor )[neuronToBranchIndex[successor]]; + + muHat[layerIndex][i] -= + std::max( -mu[successorIndex][j], 0.0 ) * + getSymbolicLbPerBranch( + successor )[neuronToBranchIndex[successor]]; } else { + muHat[layerIndex][i] += + std::max( mu[successorIndex][j], 0.0 ) * + getPredecessorSymbolicUb( + successorIndex )[successorSize * inputIndex + j]; + muHat[layerIndex][i] -= - mu[successorLayerIndex][j] * - getPredecessorSymbolicLb( successorLayerIndex ) - [successorLayerSize * predecessorIndex + j]; + std::max( -mu[successorIndex][j], 0.0 ) * + getPredecessorSymbolicLb( + successorIndex )[successorSize * inputIndex + j]; } } } @@ -1385,22 +2039,16 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( if ( layerIndex > 0 ) { // Compute mu from muHat. - mu[layerIndex] += muHat[layerIndex]; - for ( unsigned i = 0; i < layerSize; ++i ) + for ( unsigned i = 0; i < size; ++i ) { - mu[layerIndex][i] -= tightening.getCoeff( NeuronIndex( layerIndex, i ) ); + mu[layerIndex][i] += muHat[layerIndex][i] - + sign * tightening.getCoeff( NeuronIndex( layerIndex, i ) ); for ( unsigned j = 0; j < prevTigheningsCount; ++j ) { - const PolygonalTightening pt = prevTightenings[j]; + PolygonalTightening pt = prevTightenings[j]; double prevCoeff = pt.getCoeff( NeuronIndex( layerIndex, i ) ); - if ( pt._type == PolygonalTightening::LB ) - { - mu[layerIndex][i] -= gamma[j] * prevCoeff; - } - else - { - mu[layerIndex][i] += gamma[j] * prevCoeff; - } + double currentSign = ( pt._type == PolygonalTightening::LB ? 1 : -1 ); + mu[layerIndex][i] += currentSign * gamma[j] * prevCoeff; } } } @@ -1410,289 +2058,102 @@ double NetworkLevelReasoner::getParameterisdPolygonalTighteningLowerBound( Vector inputLayerBound( inputLayerSize, 0 ); for ( unsigned i = 0; i < inputLayerSize; ++i ) { - inputLayerBound[i] += tightening.getCoeff( NeuronIndex( 0, i ) ) - muHat[0][i]; + inputLayerBound[i] += sign * tightening.getCoeff( NeuronIndex( 0, i ) ) - muHat[0][i]; for ( unsigned j = 0; j < prevTigheningsCount; ++j ) { - const PolygonalTightening pt = prevTightenings[j]; + PolygonalTightening pt = prevTightenings[j]; double prevCoeff = pt.getCoeff( NeuronIndex( 0, i ) ); - if ( pt._type == PolygonalTightening::LB ) - { - inputLayerBound[i] += gamma[j] * prevCoeff; - } - else - { - inputLayerBound[i] -= gamma[j] * prevCoeff; - } + double currentSign = ( pt._type == PolygonalTightening::LB ? 1 : -1 ); + inputLayerBound[i] -= currentSign * gamma[j] * prevCoeff; } } - // Compute lower bound for polygonal tightening bias using mu and inputLayerBound. - double lowerBound = 0; + // Compute bound for polygonal tightening bias using mu and inputLayerBound. + double bound = 0; for ( unsigned i = 0; i < prevTigheningsCount; ++i ) { - const PolygonalTightening pt = prevTightenings[i]; - if ( pt._type == PolygonalTightening::LB ) - { - lowerBound -= gamma[i] * pt._value; - } - else - { - lowerBound += gamma[i] * pt._value; - } + PolygonalTightening pt = prevTightenings[i]; + double currentSign = ( pt._type == PolygonalTightening::LB ? 1 : -1 ); + bound += currentSign * gamma[i] * pt._value; } - - for ( int index = maxLayer; index > 0; --index ) + for ( unsigned layerIndex = maxLayer; layerIndex >= 1; --layerIndex ) { - Layer *layer = _layerIndexToLayer[index]; - const unsigned layerSize = layer->getSize(); - const unsigned layerIndex = layer->getLayerIndex(); - + Layer *layer = _layerIndexToLayer[layerIndex]; if ( layer->getLayerType() == Layer::WEIGHTED_SUM ) { const double *biases = layer->getBiases(); - for ( unsigned i = 0; i < layerSize; ++i ) + for ( unsigned i = 0; i < layer->getSize(); ++i ) { if ( !layer->neuronEliminated( i ) ) { - lowerBound -= mu[layerIndex][i] * biases[i]; + bound -= mu[layerIndex][i] * biases[i]; } else { - lowerBound -= mu[layerIndex][i] * layer->getEliminatedNeuronValue( i ); + bound -= mu[layerIndex][i] * layer->getEliminatedNeuronValue( i ); } } } else { - for ( unsigned i = 0; i < layerSize; ++i ) + for ( unsigned i = 0; i < layer->getSize(); ++i ) { if ( !layer->neuronEliminated( i ) ) { - if ( mu[layerIndex][i] > 0 ) + NeuronIndex index( layerIndex, i ); + if ( neuronToBranchIndex.exists( index ) ) { - lowerBound -= - mu[layerIndex][i] * getPredecessorSymbolicUpperBias( layerIndex )[i]; + bound -= std::max( mu[layerIndex][i], 0.0 ) * + getSymbolicUpperBiasPerBranch( index )[neuronToBranchIndex[index]]; + bound += std::max( -mu[layerIndex][i], 0.0 ) * + getSymbolicLowerBiasPerBranch( index )[neuronToBranchIndex[index]]; } else { - lowerBound += - mu[layerIndex][i] * getPredecessorSymbolicLowerBias( layerIndex )[i]; + bound -= std::max( mu[layerIndex][i], 0.0 ) * + getPredecessorSymbolicUpperBias( layerIndex )[i]; + bound += std::max( -mu[layerIndex][i], 0.0 ) * + getPredecessorSymbolicLowerBias( layerIndex )[i]; } } else { - lowerBound -= + bound -= FloatUtils::abs( mu[layerIndex][i] ) * layer->getEliminatedNeuronValue( i ); } } } } - Layer *inputLayer = _layerIndexToLayer[0]; - const double *inputLbs = inputLayer->getLbs(); - const double *inputUbs = inputLayer->getUbs(); for ( unsigned i = 0; i < inputLayerSize; ++i ) { - if ( inputLayerBound[i] > 0 ) - { - lowerBound += inputLayerBound[i] * inputUbs[i]; - } - else - { - lowerBound += inputLayerBound[i] * inputLbs[i]; - } + bound += std::max( inputLayerBound[i], 0.0 ) * inputLayer->getLb( i ); + bound -= std::max( -inputLayerBound[i], 0.0 ) * inputLayer->getUb( i ); } - return lowerBound; + return sign * bound; } -const Vector NetworkLevelReasoner::generatePolygonalTightenings() -{ - if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) - { - return generatePolygonalTighteningsForPMNR(); - } - else if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP ) - { - return generatePolygonalTighteningsForInvprop(); - } - - const Vector tighteningsVector = Vector( {} ); - return tighteningsVector; -} - -const Vector NetworkLevelReasoner::generatePolygonalTighteningsForPMNR() -{ - Vector tightenings = Vector( {} ); - Vector lowerTightenings = Vector( {} ); - Vector upperTightenings = Vector( {} ); - const Vector constraints = selectConstraints(); - unsigned neuronCount = constraints.size(); - - for ( const auto &pair : constraints ) - { - unsigned layerIndex = pair._layer; - unsigned neuron = pair._neuron; - Layer *layer = _layerIndexToLayer[layerIndex]; - unsigned size = layer->getSize(); - - Map neuronToLowerCoeff; - Map neuronToUpperCoeff; - neuronToLowerCoeff[pair] = -1; - neuronToUpperCoeff[pair] = -1; - - List sources = layer->getActivationSources( neuron ); - unsigned predecessorIndex = 0; - for ( const auto &sourceIndex : sources ) - { - neuronToLowerCoeff[sourceIndex] = - getPredecessorSymbolicLb( layerIndex )[predecessorIndex * size + neuron]; - neuronToUpperCoeff[sourceIndex] = - getPredecessorSymbolicUb( layerIndex )[predecessorIndex * size + neuron]; - ++predecessorIndex; - } - PolygonalTightening lowerTightening( neuronToLowerCoeff, 0, PolygonalTightening::UB ); - PolygonalTightening upperTightening( neuronToLowerCoeff, 0, PolygonalTightening::LB ); - lowerTightenings.append( lowerTightening ); - upperTightenings.append( upperTightening ); - } - - int range = ( 1 << neuronCount ) - 1; - for ( int i = 0; i < range; ++i ) - { - Map neuronToLowerCoeff; - Map neuronToUpperCoeff; - for ( unsigned j = 0; j < neuronCount; ++j ) - { - int flag = ( i >> j ) % 2; - if ( flag > 0 ) - { - for ( const auto &pair : lowerTightenings[j]._neuronToCoefficient ) - { - neuronToLowerCoeff[pair.first] = pair.second; - } - for ( const auto &pair : upperTightenings[j]._neuronToCoefficient ) - { - neuronToUpperCoeff[pair.first] = pair.second; - } - } - } - PolygonalTightening lowerTightening( neuronToLowerCoeff, 0, PolygonalTightening::UB ); - PolygonalTightening upperTightening( neuronToLowerCoeff, 0, PolygonalTightening::LB ); - tightenings.append( lowerTightening ); - tightenings.append( upperTightening ); - } - const Vector tighteningsVector = - Vector( tightenings ); - return tighteningsVector; -} - -const Vector NetworkLevelReasoner::generatePolygonalTighteningsForInvprop() -{ - Vector tightenings = Vector( {} ); - for ( const auto &pair : _layerIndexToLayer ) - { - Layer *layer = pair.second; - const Vector nonFixedNeurons = getNonFixedNeurons( layer ); - for ( const auto &index : nonFixedNeurons ) - { - Map neuronToLowerCoeff; - Map neuronToUpperCoeff; - neuronToLowerCoeff[index] = 1; - neuronToUpperCoeff[index] = 1; - PolygonalTightening lowerTightening( - neuronToLowerCoeff, layer->getUb( index._neuron ), PolygonalTightening::UB ); - PolygonalTightening upperTightening( - neuronToLowerCoeff, layer->getLb( index._neuron ), PolygonalTightening::LB ); - tightenings.append( lowerTightening ); - tightenings.append( upperTightening ); - } - } - const Vector tighteningsVector = - Vector( tightenings ); - return tighteningsVector; -} - -const Vector NetworkLevelReasoner::selectConstraints() -{ - double maxScore = 0; - unsigned maxScoreIndex = 0; - for ( const auto &pair : _layerIndexToLayer ) - { - double layerScore = 0; - unsigned layerIndex = pair.first; - Layer *layer = pair.second; - const Vector nonFixedNeurons = getNonFixedNeurons( layer ); - for ( const auto &index : nonFixedNeurons ) - { - double neuronScore = getPMNRScore( index ); - layerScore += neuronScore; - } - - if ( layerScore > maxScore ) - { - maxScore = layerScore; - maxScoreIndex = layerIndex; - } - } - - Layer *layer = _layerIndexToLayer[maxScoreIndex]; - std::priority_queue, - std::vector>, - std::less>> - maxQueue; - const Vector nonFixedNeurons = getNonFixedNeurons( layer ); - for ( const auto &index : nonFixedNeurons ) - { - maxQueue.push( std::pair( getPMNRScore( index ), index._neuron ) ); - } - - unsigned neuronCount = - std::min( GlobalConfiguration::PMNR_SELECTED_NEURONS, nonFixedNeurons.size() ); - Vector neuronVector = Vector( neuronCount ); - for ( unsigned i = 0; i < neuronCount; ++i ) - { - unsigned neuron = maxQueue.top().second; - NeuronIndex idx = NeuronIndex( maxScoreIndex, neuron ); - neuronVector[i] = idx; - maxQueue.pop(); - } - - const Vector vect( neuronVector ); - return vect; -} - -void NetworkLevelReasoner::initializePMNRScoreMap() +void NetworkLevelReasoner::initializePMNRScoreMap() { + // Clear PMNR score map. _neuronToPMNRScores.clear(); for ( const auto &pair : _layerIndexToLayer ) { - Layer *layer = pair.second; - const Vector nonFixedNeurons = getNonFixedNeurons( layer ); - for ( const auto &index : nonFixedNeurons ) + for ( const auto &index : pair.second->getNonfixedNeurons() ) { _neuronToPMNRScores.insert( index, calculateNeuronPMNRScore( index ) ); } } } -double NetworkLevelReasoner::calculateNeuronPMNRScore( NeuronIndex index ) const +double NetworkLevelReasoner::calculateNeuronPMNRScore( NeuronIndex index ) { - ASSERT( isNeuronNonFixed( index ) ); + ASSERT( _layerIndexToLayer[index._layer]->neuronNonfixed( index._neuron ) ); + if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM ) - { - return 0; - } - else if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT ) + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT ) { return calculatePMNRGradientScore( index ); } @@ -1704,401 +2165,1458 @@ double NetworkLevelReasoner::calculateNeuronPMNRScore( NeuronIndex index ) const return 0; } -double NetworkLevelReasoner::calculatePMNRGradientScore( NeuronIndex index ) const +double NetworkLevelReasoner::calculatePMNRGradientScore( NeuronIndex index ) { double score = 0; unsigned neuron = index._neuron; unsigned layerIndex = index._layer; unsigned outputLayerSize = _layerIndexToLayer[getNumberOfLayers() - 1]->getSize(); - for ( unsigned j = 0; j < outputLayerSize; ++j ) + + // Given upper and lower symbolic bounds for a neuron, ita gradient vector is estimated as + // the average of its symbolic weights, and its Gradient heuristic is the gradient squared. + for ( unsigned i = 0; i < outputLayerSize; ++i ) { - score += std::pow( ( getOutputSymbolicLb( layerIndex )[j * outputLayerSize + neuron] + - getOutputSymbolicUb( layerIndex )[j * outputLayerSize + neuron] ) / + score += std::pow( ( getOutputSymbolicLb( layerIndex )[neuron * outputLayerSize + i] + + getOutputSymbolicUb( layerIndex )[neuron * outputLayerSize + i] ) / 2.0, 2 ); } return score; } -double NetworkLevelReasoner::calculatePMNRBBPSScore( NeuronIndex index ) const -{ - return index._neuron * 3.5; // ... -} - -void NetworkLevelReasoner::initializeBBPSBranchingMap() +double NetworkLevelReasoner::calculatePMNRBBPSScore( NeuronIndex index ) { - _neuronToBBPSBranchingPoints.clear(); - for ( const auto &pair : _layerIndexToLayer ) - { - Layer *layer = pair.second; - const Vector nonFixedNeurons = getNonFixedNeurons( layer ); - for ( const auto &index : nonFixedNeurons ) - { - _neuronToBBPSBranchingPoints.insert( index, calculateBranchingPoint( index ) ); - } - } -} + // Initialize BBPS branching points and branch symbolic bound maps. + initializeBBPSBranchingMaps(); -Map NetworkLevelReasoner::calculateBranchingPoint( NeuronIndex index ) const -{ - ASSERT( isNeuronNonFixed( index ) ); + Layer *outputLayer = _layerIndexToLayer[getNumberOfLayers() - 1]; + unsigned outputLayerSize = outputLayer->getSize(); unsigned layerIndex = index._layer; unsigned neuron = index._neuron; Layer *layer = _layerIndexToLayer[layerIndex]; - Map point; - - double lb = layer->getLb( neuron ); - double ub = layer->getUb( neuron ); - double branchingPoint; - if ( layer->getLayerType() == Layer::RELU || layer->getLayerType() == Layer::LEAKY_RELU || - layer->getLayerType() == Layer::SIGN || layer->getLayerType() == Layer::ABSOLUTE_VALUE ) - { - branchingPoint = 0; - } - else + // We have the symbolic bounds map of the output layer in terms of the given neuron's layer. + // Concretize all neurons except from the given neuron. + Vector concretizedOutputSymbolicLb( outputLayerSize, 0 ); + Vector concretizedOutputSymbolicUb( outputLayerSize, 0 ); + Vector concretizedOutputSymbolicLowerBias( outputLayerSize, 0 ); + Vector concretizedOutputSymbolicUpperBias( outputLayerSize, 0 ); + for ( unsigned i = 0; i < outputLayerSize; ++i ) { - branchingPoint = ( lb + ub ) / 2.0; + concretizedOutputSymbolicLb[i] = + getOutputSymbolicLb( layerIndex )[outputLayerSize * neuron + i]; + concretizedOutputSymbolicUb[i] = + getOutputSymbolicUb( layerIndex )[outputLayerSize * neuron + i]; + concretizedOutputSymbolicLowerBias[i] = getOutputSymbolicLowerBias( layerIndex )[i]; + concretizedOutputSymbolicUpperBias[i] = getOutputSymbolicUpperBias( layerIndex )[i]; + + for ( unsigned j = 0; j < layer->getSize(); ++j ) + { + if ( j != neuron ) + { + double lowerWeight = getOutputSymbolicLb( layerIndex )[outputLayerSize * j + i]; + double upperWeight = getOutputSymbolicUb( layerIndex )[outputLayerSize * j + i]; + concretizedOutputSymbolicLowerBias[i] += lowerWeight > 0 + ? lowerWeight * layer->getLb( j ) + : lowerWeight * layer->getUb( j ); + concretizedOutputSymbolicUpperBias[i] += upperWeight > 0 + ? upperWeight * layer->getUb( j ) + : upperWeight * layer->getLb( j ); + } + } } - branchingPoint = branchingPoint; + // For every branch, we calculated the output layer's symbolic bounds in terms of the given + // neuron, and branch symbolic bounds of this neuron in terms of one source neuron. + std::pair point = getBBPSBranchingPoint( index ); + NeuronIndex sourceIndex = point.first; + double value = point.second; + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + const Vector values = Vector( { sourceLb, value, sourceUb } ); + + Vector symbolicLbPerBranch = getSymbolicLbPerBranch( index ); + Vector symbolicUbPerBranch = getSymbolicUbPerBranch( index ); + Vector symbolicLowerBiasPerBranch = getSymbolicLowerBiasPerBranch( index ); + Vector symbolicUpperBiasPerBranch = getSymbolicUpperBiasPerBranch( index ); + + unsigned branchCount = values.size() - 1; + ASSERT( symbolicLbPerBranch.size() == branchCount ); + ASSERT( symbolicUbPerBranch.size() == branchCount ); + ASSERT( symbolicLowerBiasPerBranch.size() == branchCount ); + ASSERT( symbolicUpperBiasPerBranch.size() == branchCount ); + + Vector scores( branchCount, 0 ); + for ( unsigned i = 0; i < branchCount; ++i ) + { + // Substitute the branch symbolic bounds in the output symbolic bounds. + Vector sourceSymbolicLb( outputLayerSize, 0 ); + Vector sourceSymbolicUb( outputLayerSize, 0 ); + Vector sourceSymbolicLowerBias( outputLayerSize, 0 ); + Vector sourceSymbolicUpperBias( outputLayerSize, 0 ); + + for ( unsigned j = 0; j < outputLayerSize; ++j ) + { + sourceSymbolicLowerBias[j] += concretizedOutputSymbolicLowerBias[j]; + sourceSymbolicUpperBias[j] += concretizedOutputSymbolicUpperBias[j]; - return point; // ... -} + if ( concretizedOutputSymbolicLb[j] > 0 ) + { + sourceSymbolicLb[j] += concretizedOutputSymbolicLb[j] * symbolicLbPerBranch[i]; + sourceSymbolicLowerBias[j] += + concretizedOutputSymbolicLb[j] * symbolicLowerBiasPerBranch[i]; + } + else + { + sourceSymbolicLb[j] += concretizedOutputSymbolicLb[j] * symbolicUbPerBranch[i]; + sourceSymbolicLowerBias[j] += + concretizedOutputSymbolicLb[j] * symbolicUpperBiasPerBranch[i]; + } -const Map> -NetworkLevelReasoner::getParametersForLayers( const Vector &coeffs ) const -{ - unsigned totalCoeffsCount = getNumberOfParameters(); - ASSERT( coeffs.size() == totalCoeffsCount ); - unsigned index = 0; - Map> layerIndicesToParameters; - for ( const auto &pair : _layerIndexToLayer ) - { - unsigned layerIndex = pair.first; - Layer *layer = pair.second; - unsigned coeffsCount = getNumberOfParametersPerType( layer->getLayerType() ); - Vector currentCoeffs( coeffsCount ); - for ( unsigned i = 0; i < coeffsCount; ++i ) - { - currentCoeffs[i] = coeffs[index + i]; + if ( concretizedOutputSymbolicUb[j] > 0 ) + { + sourceSymbolicUb[j] += concretizedOutputSymbolicUb[j] * symbolicUbPerBranch[i]; + sourceSymbolicUpperBias[j] += + concretizedOutputSymbolicUb[j] * symbolicUpperBiasPerBranch[i]; + } + else + { + sourceSymbolicUb[j] += concretizedOutputSymbolicUb[j] * symbolicLbPerBranch[i]; + sourceSymbolicUpperBias[j] += + concretizedOutputSymbolicUb[j] * symbolicLowerBiasPerBranch[i]; + } + + // concretize the source neuron to get concrete bounds for every output neuron. + double concreteLb = + sourceSymbolicLb[j] > 0 + ? sourceSymbolicLb[j] * values[i] + sourceSymbolicLowerBias[j] + : sourceSymbolicLb[j] * values[i + 1] + sourceSymbolicLowerBias[j]; + double concreteUb = sourceSymbolicUb[j] > 0 + ? sourceSymbolicUb[j] * values[i + 1] + sourceSymbolicUpperBias[j] + : sourceSymbolicUb[j] * values[i] + sourceSymbolicUpperBias[j]; + + double outputLb = outputLayer->getLb( j ); + double outputUb = outputLayer->getUb( j ); + + // The branch's score is the improvement of concrete bounds over known DeepPoly bounds. + scores[i] += + std::max( outputUb - concreteUb, 0.0 ) + std::max( concreteLb - outputLb, 0.0 ); } - layerIndicesToParameters.insert( layerIndex, currentCoeffs ); - index += coeffsCount; } - const Map> parametersForLayers( layerIndicesToParameters ); - return parametersForLayers; -} -unsigned NetworkLevelReasoner::getNumberOfParameters() const -{ - unsigned num = 0; - for ( const auto &pair : _layerIndexToLayer ) + // The neuron's final PMNR-BBPS score is the average of its branch scores. + double score = 0; + for ( unsigned i = 0; i < branchCount; ++i ) { - Layer *layer = pair.second; - num += getNumberOfParametersPerType( layer->getLayerType() ); + score += scores[i]; } - return num; + return score / branchCount; } -unsigned NetworkLevelReasoner::getNumberOfParametersPerType( Layer::Type t ) const +void NetworkLevelReasoner::initializeBBPSBranchingMaps() { - if ( t == Layer::RELU || t == Layer::LEAKY_RELU ) - return 1; - - if ( t == Layer::SIGN || t == Layer::BILINEAR ) - return 2; - - return 0; -} + // Clear BBPS branching points and branch symbolic bound maps. + _neuronToBBPSBranchingPoints.clear(); + _neuronToSymbolicLbPerBranch.clear(); + _neuronToSymbolicUbPerBranch.clear(); + _neuronToSymbolicLowerBiasPerBranch.clear(); + _neuronToSymbolicUpperBiasPerBranch.clear(); -const Vector NetworkLevelReasoner::getLayersWithNonFixedNeurons() const -{ - Vector layerWithNonFixedNeurons = Vector( {} ); + // Calculate branching points, symbolic bounds for non-fixed neurons which support branching. for ( const auto &pair : _layerIndexToLayer ) { - unsigned layerIndex = pair.first; - Layer *layer = pair.second; - const Vector nonFixedNeurons = getNonFixedNeurons( layer ); - if ( nonFixedNeurons.size() > 0 ) + const Layer *layer = pair.second; + for ( const auto &index : layer->getNonfixedNeurons() ) { - layerWithNonFixedNeurons.append( layerIndex ); + if ( supportsInvpropBranching( layer->getLayerType() ) ) + { + std::pair point = calculateBranchingPoint( index ); + _neuronToBBPSBranchingPoints.insert( index, point ); + + NeuronIndex sourceIndex = point.first; + double value = point.second; + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + const Vector values = Vector( { sourceLb, value, sourceUb } ); + + unsigned branchCount = values.size() - 1; + Vector symbolicLbPerBranch = Vector( branchCount, 0 ); + Vector symbolicUbPerBranch = Vector( branchCount, 0 ); + Vector symbolicLowerBiasPerBranch = Vector( branchCount, 0 ); + Vector symbolicUpperBiasPerBranch = Vector( branchCount, 0 ); + + calculateSymbolicBoundsPerBranch( index, + sourceIndex, + values, + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch, + branchCount ); + + _neuronToSymbolicLbPerBranch.insert( index, symbolicLbPerBranch ); + _neuronToSymbolicUbPerBranch.insert( index, symbolicUbPerBranch ); + _neuronToSymbolicLowerBiasPerBranch.insert( index, symbolicLowerBiasPerBranch ); + _neuronToSymbolicUpperBiasPerBranch.insert( index, symbolicUpperBiasPerBranch ); + } } } - const Vector layerList = Vector( layerWithNonFixedNeurons ); - return layerList; } -const Vector NetworkLevelReasoner::getNonFixedNeurons( const Layer *layer ) const +const std::pair +NetworkLevelReasoner::calculateBranchingPoint( NeuronIndex index ) const { - Vector nonFixedNeurons = Vector( {} ); - unsigned size = layer->getSize(); - unsigned layerIndex = layer->getLayerIndex(); - for ( unsigned i = 0; i < size; ++i ) + const Layer *layer = _layerIndexToLayer[index._layer]; + unsigned neuron = index._neuron; + ASSERT( layer->neuronNonfixed( neuron ) ); + std::pair point; + + // Heuristically generate candidates for branching points. + Vector> candidates = + generateBranchingPointCandidates( layer, neuron ); + unsigned numberOfCandidates = candidates.size(); + + if ( numberOfCandidates == 1 ) + { + point = candidates[0]; + } + else { - NeuronIndex index( layerIndex, i ); - if ( isNeuronNonFixed( index ) ) + Vector scores( numberOfCandidates, 0 ); + double minScore = FloatUtils::infinity(); + unsigned minScoreIndex = 0; + for ( unsigned i = 0; i < numberOfCandidates; ++i ) { - nonFixedNeurons.append( index ); + // Calculate branch symbolic bounds for every candidate. + NeuronIndex sourceIndex = candidates[i].first; + double value = candidates[i].second; + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + const Vector values = Vector( { sourceLb, value, sourceUb } ); + + unsigned branchCount = values.size() - 1; + Vector symbolicLbPerBranch = Vector( branchCount, 0 ); + Vector symbolicUbPerBranch = Vector( branchCount, 0 ); + Vector symbolicLowerBiasPerBranch = Vector( branchCount, 0 ); + Vector symbolicUpperBiasPerBranch = Vector( branchCount, 0 ); + calculateSymbolicBoundsPerBranch( index, + sourceIndex, + values, + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch, + branchCount ); + + // Select candidate which minimizes tightening loss. + scores[i] = calculateTighteningLoss( values, + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch, + branchCount ); + if ( scores[i] < minScore ) + { + minScore = scores[i]; + minScoreIndex = i; + } } + point = candidates[minScoreIndex]; } - const Vector neuronList = Vector( nonFixedNeurons ); - return neuronList; + + const std::pair branchingPoint( point ); + return branchingPoint; } -bool NetworkLevelReasoner::isNeuronNonFixed( NeuronIndex index ) const +const Vector> +NetworkLevelReasoner::generateBranchingPointCandidates( const Layer *layer, unsigned i ) const { - unsigned layerIndex = index._layer; - unsigned neuron = index._neuron; - Layer *layer = _layerIndexToLayer[layerIndex]; - if ( layer->neuronEliminated( neuron ) ) - { - return false; - } + ASSERT( layer->neuronNonfixed( i ) ); + Layer::Type type = layer->getLayerType(); - bool nonFixed = false; - switch ( layer->getLayerType() ) + switch ( type ) { case Layer::RELU: case Layer::LEAKY_RELU: + case Layer::SIGN: + case Layer::ABSOLUTE_VALUE: { - double lb = layer->getLb( neuron ); - double ub = layer->getUb( neuron ); - nonFixed = !FloatUtils::isPositive( lb ) && !FloatUtils::isZero( ub ); + return generateBranchingPointCandidatesAtZero( layer, i ); break; } - case Layer::SIGN: + case Layer::ROUND: { - double lb = layer->getLb( neuron ); - double ub = layer->getUb( neuron ); - nonFixed = FloatUtils::isNegative( lb ) && !FloatUtils::isNegative( ub ); + return generateBranchingPointCandidatesForRound( layer, i ); break; } - case Layer::ABSOLUTE_VALUE: + case Layer::SIGMOID: { - NeuronIndex sourceIndex = *layer->getActivationSources( neuron ).begin(); - const Layer *sourceLayer = getLayer( sourceIndex._layer ); - double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); - double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - nonFixed = sourceLb < 0 && sourceUb > 0; + return generateBranchingPointCandidatesForSigmoid( layer, i ); break; } - case Layer::SIGMOID: + case Layer::MAX: { - NeuronIndex sourceIndex = *layer->getActivationSources( neuron ).begin(); - const Layer *sourceLayer = getLayer( sourceIndex._layer ); - double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); - double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - nonFixed = !FloatUtils::areEqual( sourceLb, sourceUb ); + return generateBranchingPointCandidatesForMax( layer, i ); break; } - case Layer::ROUND: + case Layer::SOFTMAX: { - NeuronIndex sourceIndex = *layer->getActivationSources( neuron ).begin(); - const Layer *sourceLayer = getLayer( sourceIndex._layer ); - double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); - double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - nonFixed = - !FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ); + return generateBranchingPointCandidatesForSoftmax( layer, i ); break; } - case Layer::MAX: + case Layer::BILINEAR: { - List sources = layer->getActivationSources( neuron ); - const Layer *sourceLayer = getLayer( sources.begin()->_layer ); - NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); - double maxLowerBound = FloatUtils::negativeInfinity(); - double maxUpperBound = FloatUtils::negativeInfinity(); - - Map sourceUbs; - for ( const auto &sourceIndex : sources ) - { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - sourceUbs[sourceIndex] = sourceUb; - if ( maxLowerBound < sourceLb ) - { - indexOfMaxLowerBound = sourceIndex; - maxLowerBound = sourceLb; - } - if ( maxUpperBound < sourceUb ) - { - maxUpperBound = sourceUb; - } - } - - bool phaseFixed = true; - for ( const auto &sourceIndex : sources ) - { - if ( sourceIndex != indexOfMaxLowerBound && - FloatUtils::gt( sourceUbs[sourceIndex], maxLowerBound ) ) - { - phaseFixed = false; - break; - } - } - nonFixed = !phaseFixed; + return generateBranchingPointCandidatesForBilinear( layer, i ); break; } - case Layer::SOFTMAX: + default: { - List sources = layer->getActivationSources( neuron ); - const Layer *sourceLayer = getLayer( sources.begin()->_layer ); - Vector sourceLbs; - Vector sourceUbs; - Set handledInputNeurons; - for ( const auto &sourceIndex : sources ) + printf( "Error! Neuron type %u unsupported\n", type ); + throw MarabouError( MarabouError::NETWORK_LEVEL_REASONER_ACTIVATION_NOT_SUPPORTED ); + break; + } + } +} + +const Vector> +NetworkLevelReasoner::generateBranchingPointCandidatesAtZero( const Layer *layer, unsigned i ) const +{ + // A Relu/Sign/Abs/Leaky Relu activation is only branched at zero. + Vector> candidates; + NeuronIndex sourceIndex = *layer->getActivationSources( i ).begin(); + std::pair point( sourceIndex, 0 ); + candidates.append( point ); + const Vector> branchingPointCandidates( candidates ); + return branchingPointCandidates; +} + +const Vector> +NetworkLevelReasoner::generateBranchingPointCandidatesForRound( const Layer *layer, + unsigned i ) const +{ + // For a Round activation, the two candidates are selected as the highest value which + // rounds to the source's lb, and the lowest value which rounds to the source's ub. + Vector> candidates; + NeuronIndex sourceIndex = *layer->getActivationSources( i ).begin(); + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + std::pair pointLower( + sourceIndex, + FloatUtils::round( sourceLb ) + 0.5 - + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + std::pair pointUpper( + sourceIndex, + FloatUtils::round( sourceUb ) - 0.5 + + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + candidates.append( pointLower ); + candidates.append( pointUpper ); + const Vector> branchingPointCandidates( candidates ); + return branchingPointCandidates; +} + +const Vector> +NetworkLevelReasoner::generateBranchingPointCandidatesForSigmoid( const Layer *layer, + unsigned i ) const +{ + // For a Sigmoid activation, sample candidates uniformly in [sourceLb, sourceUb]. + Vector> candidates; + NeuronIndex sourceIndex = *layer->getActivationSources( i ).begin(); + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + unsigned numberOfCandidates = GlobalConfiguration::PMNR_BBPS_BRANCHING_CANDIDATES; + for ( unsigned j = 0; j < numberOfCandidates; ++j ) + { + std::pair point( sourceIndex, + sourceLb + ( j + 1 ) * ( sourceUb - sourceLb ) / + ( numberOfCandidates + 1 ) ); + candidates.append( point ); + } + const Vector> branchingPointCandidates( candidates ); + return branchingPointCandidates; +} + +const Vector> +NetworkLevelReasoner::generateBranchingPointCandidatesForMax( const Layer *layer, unsigned i ) const +{ + // For a Max activation, calculate source index of largest lower bound + // and sample candidates uniformly in [sourceLb, sourceUb]. + Vector> candidates; + List sources = layer->getActivationSources( i ); + NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); + double maxLowerBound = FloatUtils::negativeInfinity(); + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + if ( maxLowerBound < sourceLb ) { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + indexOfMaxLowerBound = sourceIndex; + maxLowerBound = sourceLb; } + } - unsigned i = 0; - for ( const auto &sourceIndex : sources ) + const Layer *sourceLayer = _layerIndexToLayer[indexOfMaxLowerBound._layer]; + unsigned sourceNeuron = indexOfMaxLowerBound._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + unsigned numberOfCandidates = GlobalConfiguration::PMNR_BBPS_BRANCHING_CANDIDATES; + for ( unsigned j = 0; j < numberOfCandidates; ++j ) + { + std::pair point( indexOfMaxLowerBound, + sourceLb + ( j + 1 ) * ( sourceUb - sourceLb ) / + ( numberOfCandidates + 1 ) ); + candidates.append( point ); + } + const Vector> branchingPointCandidates( candidates ); + return branchingPointCandidates; +} + +const Vector> +NetworkLevelReasoner::generateBranchingPointCandidatesForSoftmax( const Layer *layer, + unsigned i ) const +{ + // For a Softmax activation, calculate this neuron's source index in the Softmax + // and sample candidates uniformly in [sourceLb, sourceUb]. + Vector> candidates; + List sources = layer->getActivationSources( i ); + NeuronIndex selfIndex( 0, 0 ); + Set handledInputNeurons; + for ( unsigned j = 0; j < i; ++j ) + { + for ( const auto &sourceIndex : layer->getActivationSources( j ) ) { - if ( handledInputNeurons.exists( sourceIndex._neuron ) ) - ++i; - else + if ( !handledInputNeurons.exists( sourceIndex._neuron ) ) { handledInputNeurons.insert( sourceIndex._neuron ); break; } } - - double lb = - std::max( layer->getLb( neuron ), Layer::linearLowerBound( sourceLbs, sourceUbs, i ) ); - double ub = - std::max( layer->getUb( neuron ), Layer::linearUpperBound( sourceLbs, sourceUbs, i ) ); - nonFixed = !FloatUtils::areEqual( lb, ub ); - break; } - case Layer::BILINEAR: + for ( const auto &sourceIndex : sources ) { - List sources = layer->getActivationSources( neuron ); - const Layer *sourceLayer = getLayer( sources.begin()->_layer ); - bool eitherConstant = false; - for ( const auto &sourceIndex : sources ) + if ( !handledInputNeurons.exists( sourceIndex._neuron ) ) + { + selfIndex = sourceIndex; + break; + } + } + + const Layer *sourceLayer = _layerIndexToLayer[selfIndex._layer]; + unsigned sourceNeuron = selfIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + unsigned numberOfCandidates = GlobalConfiguration::PMNR_BBPS_BRANCHING_CANDIDATES; + for ( unsigned j = 0; j < numberOfCandidates; ++j ) + { + std::pair point( selfIndex, + sourceLb + ( j + 1 ) * ( sourceUb - sourceLb ) / + ( numberOfCandidates + 1 ) ); + candidates.append( point ); + } + const Vector> branchingPointCandidates( candidates ); + return branchingPointCandidates; +} + +const Vector> +NetworkLevelReasoner::generateBranchingPointCandidatesForBilinear( const Layer *layer, + unsigned i ) const +{ + // For a Bilinear activation, sample candidates uniformly from sources' [sourceLb, sourceUb]. + Vector> candidates; + List sources = layer->getActivationSources( i ); + Vector sourceLbs; + Vector sourceUbs; + Vector sourceNeurons; + Vector sourceLayerIndices; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLayerIndices.append( sourceLayer->getLayerIndex() ); + sourceNeurons.append( sourceNeuron ); + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + } + + for ( unsigned j = 0; j < sources.size(); ++j ) + { + unsigned candidatesPerDimension = + GlobalConfiguration::PMNR_BBPS_BRANCHING_CANDIDATES / sources.size(); + for ( unsigned k = 0; k < candidatesPerDimension; ++k ) + { + std::pair point( + NeuronIndex( sourceLayerIndices[j], sourceNeurons[j] ), + sourceLbs[j] + + ( k + 1 ) * ( sourceUbs[j] - sourceLbs[j] ) / ( candidatesPerDimension + 1 ) ); + candidates.append( point ); + } + } + const Vector> branchingPointCandidates( candidates ); + return branchingPointCandidates; +} + +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranch( + NeuronIndex index, + NeuronIndex sourceIndex, + const Vector &values, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch, + unsigned branchCount ) const +{ + ASSERT( symbolicLbPerBranch.size() == branchCount ); + ASSERT( symbolicUbPerBranch.size() == branchCount ); + ASSERT( symbolicLowerBiasPerBranch.size() == branchCount ); + ASSERT( symbolicUpperBiasPerBranch.size() == branchCount ); + ASSERT( values.size() == branchCount + 1 ); + + unsigned layerIndex = index._layer; + Layer *layer = _layerIndexToLayer[layerIndex]; + ASSERT( layer->neuronNonfixed( index._neuron ) ); + Layer::Type type = layer->getLayerType(); + + for ( unsigned i = 0; i < branchCount; ++i ) + { + switch ( type ) + { + case Layer::RELU: + calculateSymbolicBoundsPerBranchForRelu( i, + values[i], + values[i + 1], + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch ); + break; + + case Layer::ABSOLUTE_VALUE: + calculateSymbolicBoundsPerBranchForAbsoluteValue( i, + values[i], + values[i + 1], + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch ); + break; + + case Layer::SIGN: + calculateSymbolicBoundsPerBranchForSign( i, + values[i], + values[i + 1], + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch ); + break; + + case Layer::ROUND: + calculateSymbolicBoundsPerBranchForRound( i, + values[i], + values[i + 1], + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch ); + break; + + case Layer::SIGMOID: + calculateSymbolicBoundsPerBranchForSigmoid( i, + values[i], + values[i + 1], + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch ); + break; + + case Layer::LEAKY_RELU: + calculateSymbolicBoundsPerBranchForLeakyRelu( index, + i, + values[i], + values[i + 1], + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch ); + break; + + case Layer::MAX: + calculateSymbolicBoundsPerBranchForMax( index, + sourceIndex, + i, + values[i], + values[i + 1], + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch ); + break; + + case Layer::SOFTMAX: + calculateSymbolicBoundsPerBranchForSoftmax( index, + sourceIndex, + i, + values[i], + values[i + 1], + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch ); + break; + + case Layer::BILINEAR: + calculateSymbolicBoundsPerBranchForBilinear( index, + sourceIndex, + i, + values[i], + values[i + 1], + symbolicLbPerBranch, + symbolicUbPerBranch, + symbolicLowerBiasPerBranch, + symbolicUpperBiasPerBranch ); + break; + + default: + { + printf( "Error! Neuron type %u unsupported\n", type ); + throw MarabouError( MarabouError::NETWORK_LEVEL_REASONER_ACTIVATION_NOT_SUPPORTED ); + break; + } + } + } +} + +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranchForRelu( + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const +{ + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // Phase active + // Symbolic bound: x_b <= x_f <= x_b + symbolicUbPerBranch[i] = 1; + symbolicUpperBiasPerBranch[i] = 0; + symbolicLbPerBranch[i] = 1; + symbolicLowerBiasPerBranch[i] = 0; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // Phase inactive + // Symbolic bound: 0 <= x_f <= 0 + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = 0; + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = 0; + } + else + { + // ReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + double weight = sourceUb / ( sourceUb - sourceLb ); + symbolicUbPerBranch[i] = weight; + symbolicUpperBiasPerBranch[i] = -sourceLb * weight; + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We + // use the heuristic described in section 4.1 of + // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + // to set the value of lambda (either 0 or 1 is considered). + if ( sourceUb > -sourceLb ) + { + // lambda = 1 + // Symbolic lower bound: x_f >= x_b + symbolicLbPerBranch[i] = 1; + symbolicLowerBiasPerBranch[i] = 0; + } + else { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + // lambda = 1 + // Symbolic lower bound: x_f >= 0 + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = 0; + } + } +} + +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranchForAbsoluteValue( + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const +{ + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // Phase active + // Symbolic bound: x_b <= x_f <= x_b + symbolicUbPerBranch[i] = 1; + symbolicUpperBiasPerBranch[i] = 0; + symbolicLbPerBranch[i] = 1; + symbolicLowerBiasPerBranch[i] = 0; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // Phase inactive + // Symbolic bound: -x_b <= x_f <= -x_b + symbolicUbPerBranch[i] = -1; + symbolicUpperBiasPerBranch[i] = 0; + symbolicLbPerBranch[i] = -1; + symbolicLowerBiasPerBranch[i] = 0; + } + else + { + // AbsoluteValue not fixed + // Naive concretization: 0 <= x_f <= max(-lb, ub) + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = FloatUtils::max( -sourceLb, sourceUb ); + + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = 0; + } +} + +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranchForSign( + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const +{ + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // Phase active + // Symbolic bound: 1 <= x_f <= 1 + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = 1; + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = 1; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // Phase inactive + // Symbolic bound: -1 <= x_f <= -1 + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = -1; + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = -1; + } + else + { + // Sign not fixed + // Use the relaxation defined in https://arxiv.org/pdf/2011.02948.pdf + // Symbolic upper bound: x_f <= -2 / l * x_b + 1 + symbolicUbPerBranch[i] = -2 / sourceLb; + symbolicUpperBiasPerBranch[i] = 1; + + // Symbolic lower bound: x_f >= (2 / u) * x_b - 1 + symbolicLbPerBranch[i] = 2 / sourceUb; + symbolicLowerBiasPerBranch[i] = -1; + } +} + +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranchForRound( + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const +{ + double sourceUbRound = FloatUtils::round( sourceUb ); + double sourceLbRound = FloatUtils::round( sourceLb ); + + if ( sourceUbRound == sourceLbRound ) + { + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = sourceUbRound; + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = sourceLbRound; + } + else + { + // Round not fixed + // Symbolic upper bound: x_f <= x_b + 0.5 + symbolicUbPerBranch[i] = 1; + symbolicUpperBiasPerBranch[i] = 0.5; + + // Symbolic lower bound: x_f >= x_b - 0.5 + symbolicLbPerBranch[i] = 1; + symbolicLowerBiasPerBranch[i] = -0.5; + } +} + +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranchForSigmoid( + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const +{ + double sourceUbSigmoid = SigmoidConstraint::sigmoid( sourceUb ); + double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); + + if ( sourceUb == sourceLb ) + { + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = sourceUbSigmoid; + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = sourceLbSigmoid; + } + else + { + double lambda = ( sourceUbSigmoid - sourceLbSigmoid ) / ( sourceUb - sourceLb ); + double lambdaPrime = std::min( SigmoidConstraint::sigmoidDerivative( sourceLb ), + SigmoidConstraint::sigmoidDerivative( sourceUb ) ); + + // update lower bound + if ( FloatUtils::isPositive( sourceLb ) ) + { + symbolicLbPerBranch[i] = lambda; + symbolicLowerBiasPerBranch[i] = sourceLbSigmoid - lambda * sourceLb; + } + else + { + symbolicLbPerBranch[i] = lambdaPrime; + symbolicLowerBiasPerBranch[i] = sourceLbSigmoid - lambdaPrime * sourceLb; + } + + // update upper bound + if ( !FloatUtils::isPositive( sourceUb ) ) + { + symbolicUbPerBranch[i] = lambda; + symbolicUpperBiasPerBranch[i] = sourceUbSigmoid - lambda * sourceUb; + } + else + { + symbolicUbPerBranch[i] = lambdaPrime; + symbolicUpperBiasPerBranch[i] = sourceUbSigmoid - lambdaPrime * sourceUb; + } + } +} + +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranchForLeakyRelu( + NeuronIndex index, + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const +{ + double slope = _layerIndexToLayer[index._layer]->getAlpha(); + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // Phase active + // Symbolic bound: x_b <= x_f <= x_b + symbolicUbPerBranch[i] = 1; + symbolicUpperBiasPerBranch[i] = 0; + symbolicLbPerBranch[i] = 1; + symbolicLowerBiasPerBranch[i] = 0; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // Phase inactive + // Symbolic bound: slope * x_b <= x_f <= slope * x_b + symbolicUbPerBranch[i] = slope; + symbolicUpperBiasPerBranch[i] = 0; + symbolicLbPerBranch[i] = slope; + symbolicLowerBiasPerBranch[i] = 0; + } + else + { + // LeakyReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + double width = sourceUb - sourceLb; + double weight = ( sourceUb - slope * sourceLb ) / width; + + if ( slope <= 1 ) + { + symbolicUbPerBranch[i] = weight; + symbolicUpperBiasPerBranch[i] = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We + // use the heuristic described in section 4.1 of + // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + // to set the value of lambda (either 0 or 1 is considered). + if ( sourceUb > sourceLb ) { - eitherConstant = true; + // lambda = 1 + // Symbolic lower bound: x_f >= x_b + symbolicLbPerBranch[i] = 1; + symbolicLowerBiasPerBranch[i] = 0; } - if ( FloatUtils::areEqual( sourceLb, sourceUb ) ) + else { - eitherConstant = true; + // lambda = 1 + // Symbolic lower bound: x_f >= 0 + // Concrete lower bound: x_f >= 0 + symbolicLbPerBranch[i] = slope; + symbolicLowerBiasPerBranch[i] = 0; + } + } + else + { + symbolicLbPerBranch[i] = weight; + symbolicLowerBiasPerBranch[i] = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; + + if ( sourceUb > sourceLb ) + { + symbolicUbPerBranch[i] = 1; + symbolicUpperBiasPerBranch[i] = 0; + } + else + { + symbolicUbPerBranch[i] = slope; + symbolicLowerBiasPerBranch[i] = 0; } } - nonFixed = !eitherConstant; - break; } - case Layer::WEIGHTED_SUM: - case Layer::INPUT: +} + +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranchForMax( + NeuronIndex index, + NeuronIndex chosenSourceIndex, + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const +{ + unsigned layerIndex = index._layer; + unsigned neuron = index._neuron; + Layer *layer = _layerIndexToLayer[layerIndex]; + List sources = layer->getActivationSources( neuron ); + + NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); + double maxLowerBound = FloatUtils::negativeInfinity(); + double maxUpperBound = FloatUtils::negativeInfinity(); + + Map sourceLbs; + Map sourceUbs; + for ( const auto &sourceIndex : sources ) { - nonFixed = false; - break; + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + unsigned sourceNeuron = sourceIndex._neuron; + double currentLb = + sourceIndex != chosenSourceIndex ? sourceLayer->getLb( sourceNeuron ) : sourceLb; + double currentUb = + sourceIndex != chosenSourceIndex ? sourceLayer->getUb( sourceNeuron ) : sourceUb; + sourceLbs[sourceIndex] = currentLb; + sourceUbs[sourceIndex] = currentUb; + + if ( maxLowerBound < currentLb ) + { + indexOfMaxLowerBound = sourceIndex; + maxLowerBound = currentLb; + } + if ( maxUpperBound < currentUb ) + { + maxUpperBound = currentUb; + } } - default: + + // The phase is fixed if the lower-bound of a source variable x_b is + // larger than the upper-bounds of the other source variables. + bool phaseFixed = true; + for ( const auto &sourceIndex : sources ) { - nonFixed = false; - break; + if ( sourceIndex != indexOfMaxLowerBound && + FloatUtils::gt( sourceUbs[sourceIndex], maxLowerBound ) ) + { + phaseFixed = false; + break; + } } + + if ( phaseFixed ) + { + // Phase fixed + // Symbolic bound: x_b <= x_f <= x_b + // Concretized bound (if chosenSourceIndex != indexOfMaxLowerBound): x_b.lb <= x_f <= + // x_b.ub. + if ( chosenSourceIndex != indexOfMaxLowerBound ) + { + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = sourceLbs[indexOfMaxLowerBound]; + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = sourceUbs[indexOfMaxLowerBound]; + } + else + { + symbolicLbPerBranch[i] = 1; + symbolicLowerBiasPerBranch[i] = 0; + symbolicUbPerBranch[i] = 1; + symbolicUpperBiasPerBranch[i] = 0; + } + } + else + { + // MaxPool not fixed + // Symbolic bounds: x_b <= x_f <= maxUpperBound + // Concretized bound (if chosenSourceIndex != indexOfMaxLowerBound): x_b.lb <= x_f <= + // maxUpperBound. + if ( chosenSourceIndex != indexOfMaxLowerBound ) + { + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = sourceLbs[indexOfMaxLowerBound]; + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = maxUpperBound; + } + else + { + symbolicLbPerBranch[i] = 1; + symbolicLowerBiasPerBranch[i] = 0; + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = maxUpperBound; + } } - return nonFixed; } -void NetworkLevelReasoner::initializeSymbolicBoundsMaps( const Vector &coeffs ) +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranchForSoftmax( + NeuronIndex index, + NeuronIndex chosenSourceIndex, + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const { - // Clear the previous symbolic bound maps. - _outputSymbolicLb.clear(); - _outputSymbolicUb.clear(); - _outputSymbolicLowerBias.clear(); - _outputSymbolicUpperBias.clear(); + unsigned layerIndex = index._layer; + unsigned neuron = index._neuron; + Layer *layer = _layerIndexToLayer[layerIndex]; + List sources = layer->getActivationSources( neuron ); + Vector sourceLbs; + Vector sourceUbs; + Vector sourceMids; + Vector targetLbs; + Vector targetUbs; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + unsigned sourceNeuron = sourceIndex._neuron; + double currentLb = + sourceIndex != chosenSourceIndex ? sourceLayer->getLb( sourceNeuron ) : sourceLb; + double currentUb = + sourceIndex != chosenSourceIndex ? sourceLayer->getUb( sourceNeuron ) : sourceUb; + sourceLbs.append( currentLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( currentUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceMids.append( ( currentLb + currentUb ) / 2 ); + targetLbs.append( layer->getLb( neuron ) ); + targetUbs.append( layer->getUb( neuron ) ); + } + + unsigned selfIndex = 0; + Set handledInputNeurons; + for ( unsigned i = 0; i < neuron; ++i ) + { + for ( const auto &sourceIndex : layer->getActivationSources( i ) ) + { + if ( !handledInputNeurons.exists( sourceIndex._neuron ) ) + { + handledInputNeurons.insert( sourceIndex._neuron ); + break; + } + } + } + for ( const auto &sourceIndex : sources ) + { + if ( handledInputNeurons.exists( sourceIndex._neuron ) ) + ++selfIndex; + else + { + break; + } + } - _predecessorSymbolicLb.clear(); - _predecessorSymbolicUb.clear(); - _predecessorSymbolicLowerBias.clear(); - _predecessorSymbolicUpperBias.clear(); + double lb = std::max( Layer::linearLowerBound( sourceLbs, sourceUbs, selfIndex ), + layer->getLb( neuron ) ); + double ub = std::min( Layer::linearUpperBound( sourceLbs, sourceUbs, selfIndex ), + layer->getUb( neuron ) ); + targetLbs[selfIndex] = lb; + targetUbs[selfIndex] = ub; - // Temporarily add weighted sum layer to the NLR of the same size of the output layer. - Layer *outputLayer = _layerIndexToLayer[getNumberOfLayers() - 1]; - unsigned outputLayerIndex = outputLayer->getLayerIndex(); - unsigned outputLayerSize = outputLayer->getSize(); - unsigned newLayerIndex = outputLayerIndex + 1; + if ( FloatUtils::areEqual( lb, ub ) ) + { + symbolicUbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = ub; + symbolicLbPerBranch[i] = 0; + symbolicLowerBiasPerBranch[i] = lb; + } + else + { + // Compute Softmax symbolic bound. Neurons other than given source neuron are concretized. + if ( Options::get()->getSoftmaxBoundType() == SoftmaxBoundType::LOG_SUM_EXP_DECOMPOSITION ) + { + bool useLSE2 = false; + for ( const auto &lb : targetLbs ) + { + if ( lb > GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD ) + useLSE2 = true; + } + unsigned inputIndex = 0; + if ( !useLSE2 ) + { + symbolicLowerBiasPerBranch[i] = + Layer::LSELowerBound( sourceMids, sourceLbs, sourceUbs, selfIndex ); + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double dldj = Layer::dLSELowerBound( + sourceMids, sourceLbs, sourceUbs, selfIndex, inputIndex ); + if ( sourceIndex != chosenSourceIndex ) + { + double concretizedLowerBias = + dldj > 0 ? dldj * sourceLayer->getLb( sourceIndex._neuron ) + : dldj * sourceLayer->getUb( sourceIndex._neuron ); + symbolicLowerBiasPerBranch[i] += concretizedLowerBias; + } + else + { + symbolicLbPerBranch[i] = dldj; + } + symbolicLowerBiasPerBranch[i] -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + } + else + { + symbolicLowerBiasPerBranch[i] = + Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, selfIndex ); + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double dldj = Layer::dLSELowerBound2( + sourceMids, sourceLbs, sourceUbs, selfIndex, inputIndex ); + if ( sourceIndex != chosenSourceIndex ) + { + double concretizedLowerBias = + dldj > 0 ? dldj * sourceLayer->getLb( sourceIndex._neuron ) + : dldj * sourceLayer->getUb( sourceIndex._neuron ); + symbolicLowerBiasPerBranch[i] += concretizedLowerBias; + } + else + { + symbolicLbPerBranch[i] = dldj; + } + symbolicLowerBiasPerBranch[i] -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + } - addLayer( newLayerIndex, Layer::WEIGHTED_SUM, outputLayerSize ); - addLayerDependency( outputLayerIndex, newLayerIndex ); - Layer *newLayer = _layerIndexToLayer[newLayerIndex]; + symbolicUpperBiasPerBranch[i] = + Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, selfIndex ); + inputIndex = 0; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double dudj = Layer::dLSEUpperbound( + sourceMids, targetLbs, targetUbs, selfIndex, inputIndex ); + if ( sourceIndex != chosenSourceIndex ) + { + double concretizedUpperBias = + dudj > 0 ? dudj * sourceLayer->getUb( sourceIndex._neuron ) + : dudj * sourceLayer->getLb( sourceIndex._neuron ); + symbolicUpperBiasPerBranch[i] += concretizedUpperBias; + } + else + { + symbolicUbPerBranch[i] = dudj; + } + symbolicUpperBiasPerBranch[i] -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + } + else if ( Options::get()->getSoftmaxBoundType() == + SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) + { + symbolicLowerBiasPerBranch[i] = + Layer::ERLowerBound( sourceMids, sourceLbs, sourceUbs, selfIndex ); + unsigned inputIndex = 0; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double dldj = + Layer::dERLowerBound( sourceMids, sourceLbs, sourceUbs, selfIndex, inputIndex ); + if ( sourceIndex != chosenSourceIndex ) + { + double concretizedLowerBias = + dldj > 0 ? dldj * sourceLayer->getLb( sourceIndex._neuron ) + : dldj * sourceLayer->getUb( sourceIndex._neuron ); + symbolicLowerBiasPerBranch[i] += concretizedLowerBias; + } + else + { + symbolicLbPerBranch[i] = dldj; + } + symbolicLowerBiasPerBranch[i] -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } - for ( unsigned i = 0; i < outputLayerSize; ++i ) + symbolicUpperBiasPerBranch[i] = + Layer::ERUpperBound( sourceMids, targetLbs, targetUbs, selfIndex ); + inputIndex = 0; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + double dudj = + Layer::dERUpperBound( sourceMids, targetLbs, targetUbs, selfIndex, inputIndex ); + if ( sourceIndex != chosenSourceIndex ) + { + double concretizedUpperBias = + dudj > 0 ? dudj * sourceLayer->getUb( sourceIndex._neuron ) + : dudj * sourceLayer->getLb( sourceIndex._neuron ); + symbolicUpperBiasPerBranch[i] += concretizedUpperBias; + } + else + { + symbolicUbPerBranch[i] = dudj; + } + symbolicUpperBiasPerBranch[i] -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + } + } +} +void NetworkLevelReasoner::calculateSymbolicBoundsPerBranchForBilinear( + NeuronIndex index, + NeuronIndex chosenSourceIndex, + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const +{ + unsigned layerIndex = index._layer; + unsigned neuron = index._neuron; + Layer *layer = _layerIndexToLayer[layerIndex]; + List sources = layer->getActivationSources( neuron ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + Vector sourceNeurons; + Vector sourceLayerSizes; + Vector sourceLayers; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + unsigned sourceNeuron = sourceIndex._neuron; + double currentLb = + sourceIndex != chosenSourceIndex ? sourceLayer->getLb( sourceNeuron ) : sourceLb; + double currentUb = + sourceIndex != chosenSourceIndex ? sourceLayer->getUb( sourceNeuron ) : sourceUb; + + sourceLayers.append( sourceLayer ); + sourceNeurons.append( sourceIndex ); + sourceLbs.append( currentLb ); + sourceUbs.append( currentUb ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + symbolicUbPerBranch[i] = 0; + symbolicLbPerBranch[i] = 0; + symbolicUpperBiasPerBranch[i] = sourceValues[0] * sourceValues[1]; + symbolicLowerBiasPerBranch[i] = sourceValues[0] * sourceValues[1]; + } + else { - setWeight( outputLayerIndex, i, newLayerIndex, i, 1 ); - newLayer->setLb( i, FloatUtils::infinity() ); - newLayer->setUb( i, FloatUtils::negativeInfinity() ); + // Symbolic lower bound: + // out >= alpha * x + beta * y + gamma + // where alpha = lb_y, beta = lb_x, gamma = -lb_x * lb_y. + + // Symbolic upper bound: + // out <= alpha * x + beta * y + gamma + // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y. + + // Neuron other than given source neuron is concretized. + + double aLower = sourceLbs[1]; + double aUpper = sourceUbs[1]; + double bLower = sourceLbs[0]; + double bUpper = sourceLbs[0]; + if ( sourceNeurons[0] != chosenSourceIndex ) + { + double concretizedLowerBias = + aLower > 0 ? aLower * sourceLayers[0]->getLb( sourceNeurons[0]._neuron ) + : aLower * sourceLayers[0]->getUb( sourceNeurons[0]._neuron ); + double concretizedUpperBias = + aUpper > 0 ? aUpper * sourceLayers[0]->getUb( sourceNeurons[0]._neuron ) + : aUpper * sourceLayers[0]->getLb( sourceNeurons[0]._neuron ); + symbolicLbPerBranch[i] = bLower; + symbolicUbPerBranch[i] = bUpper; + symbolicLowerBiasPerBranch[i] += concretizedLowerBias; + symbolicUpperBiasPerBranch[i] += concretizedUpperBias; + } + else + { + double concretizedLowerBias = + bLower > 0 ? bLower * sourceLayers[1]->getLb( sourceNeurons[1]._neuron ) + : bLower * sourceLayers[1]->getUb( sourceNeurons[1]._neuron ); + double concretizedUpperBias = + bUpper > 0 ? bUpper * sourceLayers[1]->getUb( sourceNeurons[1]._neuron ) + : bUpper * sourceLayers[1]->getLb( sourceNeurons[1]._neuron ); + symbolicLbPerBranch[i] = aLower; + symbolicUbPerBranch[i] = aUpper; + symbolicUpperBiasPerBranch[i] += concretizedUpperBias; + symbolicLowerBiasPerBranch[i] += concretizedLowerBias; + } + symbolicLowerBiasPerBranch[i] += -sourceLbs[0] * sourceLbs[1]; + symbolicUpperBiasPerBranch[i] += -sourceLbs[0] * sourceUbs[1]; } +} - // Initialize maps with zero vectors. - unsigned maxSize = getMaxLayerSize(); +double +NetworkLevelReasoner::calculateTighteningLoss( const Vector &values, + const Vector &symbolicLbPerBranch, + const Vector &symbolicUbPerBranch, + const Vector &symbolicLowerBiasPerBranch, + const Vector &symbolicUpperBiasPerBranch, + unsigned branchCount ) const +{ + ASSERT( symbolicLbPerBranch.size() == branchCount ); + ASSERT( symbolicUbPerBranch.size() == branchCount ); + ASSERT( symbolicLowerBiasPerBranch.size() == branchCount ); + ASSERT( symbolicUpperBiasPerBranch.size() == branchCount ); + ASSERT( values.size() == branchCount + 1 ); + + double score = 0; + for ( unsigned i = 0; i < branchCount; ++i ) + { + // Given branch #i symbolic bounds of x_f >= a_l x_b + b_l, x_f <= a_u x_b + b_u, x_b in + // [l_i, u_i], calculate integral of ( a_u x_b + b_u ) - ( a_l x_b + b_l ) in [l_i, u_i]. + score += symbolicUbPerBranch[i] * + ( std::pow( values[i + 1], 2 ) - std::pow( values[i], 2 ) ) / 2; + score += symbolicUpperBiasPerBranch[i] * ( values[i + 1] - values[i] ); + score -= symbolicLbPerBranch[i] * + ( std::pow( values[i + 1], 2 ) - std::pow( values[i], 2 ) ) / 2; + score -= symbolicLowerBiasPerBranch[i] * ( values[i + 1] - values[i] ); + } + return score; +} + +const Map> +NetworkLevelReasoner::getParametersForLayers( const Vector &coeffs ) const +{ + ASSERT( coeffs.size() == getNumberOfParameters() ); + unsigned index = 0; + Map> layerIndicesToParameters; for ( const auto &pair : _layerIndexToLayer ) { unsigned layerIndex = pair.first; Layer *layer = pair.second; - unsigned size = layer->getSize(); - Layer::Type layerType = layer->getLayerType(); - - _outputSymbolicLb[layerIndex] = Vector( outputLayerSize * size, 0 ); - _outputSymbolicUb[layerIndex] = Vector( outputLayerSize * size, 0 ); - _outputSymbolicLowerBias[layerIndex] = Vector( outputLayerSize, 0 ); - _outputSymbolicUpperBias[layerIndex] = Vector( outputLayerSize, 0 ); - - if ( layerType != Layer::WEIGHTED_SUM && layerType != Layer::INPUT ) + unsigned coeffsCount = getNumberOfParametersPerType( layer->getLayerType() ); + Vector currentCoeffs( coeffsCount ); + for ( unsigned i = 0; i < coeffsCount; ++i ) { - _predecessorSymbolicLb[layerIndex] = Vector( size * maxSize, 0 ); - _predecessorSymbolicUb[layerIndex] = Vector( size * maxSize, 0 ); - _predecessorSymbolicLowerBias[layerIndex] = Vector( size, 0 ); - _predecessorSymbolicUpperBias[layerIndex] = Vector( size, 0 ); + currentCoeffs[i] = coeffs[index + i]; } + layerIndicesToParameters.insert( layerIndex, currentCoeffs ); + index += coeffsCount; } + const Map> parametersForLayers( layerIndicesToParameters ); + return parametersForLayers; +} - // Populate symbolic bounds maps via DeepPoly. - bool useParameterisedSBT = coeffs.size() > 0; - Map> layerIndicesToParameters = Map>( {} ); - if ( useParameterisedSBT ) +unsigned NetworkLevelReasoner::getNumberOfParameters() const +{ + unsigned num = 0; + for ( const auto &pair : _layerIndexToLayer ) { - layerIndicesToParameters = getParametersForLayers( coeffs ); + Layer *layer = pair.second; + num += getNumberOfParametersPerType( layer->getLayerType() ); } + return num; +} - _deepPolyAnalysis = - std::unique_ptr( new DeepPolyAnalysis( this, - true, - true, - useParameterisedSBT, - &layerIndicesToParameters, - &_outputSymbolicLb, - &_outputSymbolicUb, - &_outputSymbolicLowerBias, - &_outputSymbolicUpperBias, - &_predecessorSymbolicLb, - &_predecessorSymbolicUb, - &_predecessorSymbolicLowerBias, - &_predecessorSymbolicUpperBias ) ); - _deepPolyAnalysis->run(); - _deepPolyAnalysis = nullptr; +unsigned NetworkLevelReasoner::getNumberOfParametersPerType( Layer::Type t ) const +{ + if ( t == Layer::RELU || t == Layer::LEAKY_RELU ) + return 1; + + if ( t == Layer::SIGN || t == Layer::BILINEAR ) + return 2; + + return 0; +} - // Remove new weighted sum layer. - removeLayerDependency( outputLayerIndex, newLayerIndex ); - _layerIndexToLayer.erase( newLayerIndex ); - if ( newLayer ) +bool NetworkLevelReasoner::supportsInvpropBranching( Layer::Type type ) const +{ + // Without BBPS heuristic, only branch Relu/Sign/Abs/Leaky Relu activation before INVPROP. + if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT ) + { + return type == Layer::RELU || type == Layer::LEAKY_RELU || type == Layer::SIGN || + type == Layer::ABSOLUTE_VALUE; + } + + // When using BBPS heuristic, all implemented activations could be branched before INVPROP. + else if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) { - delete newLayer; - newLayer = NULL; + return type == Layer::RELU || type == Layer::LEAKY_RELU || type == Layer::SIGN || + type == Layer::ABSOLUTE_VALUE || type == Layer::MAX || type == Layer::ROUND || + type == Layer::SIGMOID || type == Layer::SOFTMAX || type == Layer::BILINEAR; } + return false; +} + +const Vector NetworkLevelReasoner::getLayersWithNonfixedNeurons() const +{ + Vector layerWithNonfixedNeurons = Vector( {} ); + for ( const auto &pair : _layerIndexToLayer ) + { + if ( !pair.second->getNonfixedNeurons().empty() ) + { + layerWithNonfixedNeurons.append( pair.first ); + } + } + const Vector layerList = Vector( layerWithNonfixedNeurons ); + return layerList; } void NetworkLevelReasoner::mergeWSLayers( unsigned secondLayerIndex, diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index 90568b213a..07b01c6a38 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -2,7 +2,7 @@ /*! \file NetworkLevelReasoner.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz + ** Guy Katz, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -11,7 +11,7 @@ ** ** [[ Add lengthier description here ]] -**/ + **/ #ifndef __NetworkLevelReasoner_h__ #define __NetworkLevelReasoner_h__ @@ -112,6 +112,18 @@ class NetworkLevelReasoner : public LayerOwner the parameterised symbolic bounds (or default to regular symbolic bounds if parameterised bounds not implemented). + - DeepPoly: For every neuron in the network, calculate symbolic + bounds in term of its predecessor neurons, then perform + backsubstitution up to the input layer and concretize. + + - Parameterised DeepPoly: For certain activation functions, there + is a continuum of valid symbolic bounds. We receive a map of + coefficients in range [0, 1] for every layer index, then compute + the DeepPoly bounds via backsubstitution and concretization. For + each layer, a symbolic bound in terms its highest predecessor layers + is stored in the predessorSymbolic maps. Symbolic bounds for the last + layer in terms of every layer are stored in the outputLayer maps. + - LP Relaxation: invoking an LP solver on a series of LP relaxations of the problem we're trying to solve, and optimizing the lower and upper bounds of each of the @@ -124,11 +136,18 @@ class NetworkLevelReasoner : public LayerOwner external user calls in order to collect the tighter bounds discovered by the NLR. - - receiveTighterPolygonalBound: this is a callback from the layer - objects, through which they report tighter polygonal bounds. + - receivePolygonalTightening: this is a callback from the layer + objects, through which they report polygonal bounds. + + - getPolygonalTightenings: this is the function that an + external user calls in order to collect the polygonal bounds + discovered by the NLR. + + - receiveInfeasibleBranches: this is a callback from the layer + objects, through which they report infeasible branches combinations. - - getConstraintPolygonalTightenings: this is the function that an - external user calls in order to collect the tighter polygonal bounds + - getPolygonalTightenings: this is the function that an + external user calls in order to collect the infeasible branches combinations discovered by the NLR. */ @@ -141,31 +160,29 @@ class NetworkLevelReasoner : public LayerOwner void symbolicBoundPropagation(); void parameterisedSymbolicBoundPropagation( const Vector &coeffs ); void deepPolyPropagation(); + void parameterisedDeepPoly( bool storeSymbolicBounds = false, + const Vector &coeffs = Vector( {} ) ); void lpRelaxationPropagation(); void LPTighteningForOneLayer( unsigned targetIndex ); void MILPPropagation(); void MILPTighteningForOneLayer( unsigned targetIndex ); void iterativePropagation(); - void initializeSymbolicBoundsMaps( const Vector &coeffs = Vector( {} ) ); + void receiveTighterBound( Tightening tightening ); + void getConstraintTightenings( List &tightenings ); + void clearConstraintTightenings(); - // Return optimizable parameters which minimize parameterised SBT bounds' volume. - const Vector OptimalParameterisedSymbolicBoundTightening(); + void receivePolygonalTightening( PolygonalTightening &polygonalTightening ); + void getPolygonalTightenings( List &polygonalTightenings ); + void clearPolygonalTightenings(); - // Optimize biases of generated parameterised polygonal tightenings. - const Vector OptimizeParameterisedPolygonalTightening(); + void receiveInfeasibleBranches( Map &neuronToBranchIndex ); + void getInfeasibleBranches( List> &infeasibleBranches ); + void clearInfeasibleBranches(); // Get total number of optimizable parameters for parameterised SBT relaxation. unsigned getNumberOfParameters() const; - void receiveTighterBound( Tightening tightening ); - void getConstraintTightenings( List &tightenings ); - void clearConstraintTightenings(); - - void receivePolygonalTighterBound( PolygonalTightening polygonalTightening ); - void getConstraintPolygonalTightenings( List &polygonalTightening ); - void clearConstraintPolygonalTightenings(); - /* For debugging purposes: dump the network topology */ @@ -210,18 +227,34 @@ class NetworkLevelReasoner : public LayerOwner */ double getPreviousBias( const ReluConstraint *reluConstraint ) const; - Vector getOutputSymbolicLb( unsigned layerIndex ) const; - Vector getOutputSymbolicUb( unsigned layerIndex ) const; - Vector getOutputSymbolicLowerBias( unsigned layerIndex ) const; - Vector getOutputSymbolicUpperBias( unsigned layerIndex ) const; + // Get symbolic bounds for the last layer in term of given layer. + Vector getOutputSymbolicLb( unsigned layerIndex ); + Vector getOutputSymbolicUb( unsigned layerIndex ); + Vector getOutputSymbolicLowerBias( unsigned layerIndex ); + Vector getOutputSymbolicUpperBias( unsigned layerIndex ); - Vector getPredecessorSymbolicLb( unsigned layerIndex ) const; - Vector getPredecessorSymbolicUb( unsigned layerIndex ) const; - Vector getPredecessorSymbolicLowerBias( unsigned layerIndex ) const; - Vector getPredecessorSymbolicUpperBias( unsigned layerIndex ) const; + // Get symbolic bounds of given layer in terms of its predecessor. + Vector getPredecessorSymbolicLb( unsigned layerIndex ); + Vector getPredecessorSymbolicUb( unsigned layerIndex ); + Vector getPredecessorSymbolicLowerBias( unsigned layerIndex ); + Vector getPredecessorSymbolicUpperBias( unsigned layerIndex ); - Map getBBPSBranchingPoint( NeuronIndex index ) const; - double getPMNRScore( NeuronIndex index ) const; + /* + Get the symbolic bounds in term of predecessor layer given branch of given neuron. + */ + Vector getSymbolicLbPerBranch( NeuronIndex index ); + Vector getSymbolicUbPerBranch( NeuronIndex index ); + Vector getSymbolicLowerBiasPerBranch( NeuronIndex index ); + Vector getSymbolicUpperBiasPerBranch( NeuronIndex index ); + + /* + Get the BBPS branching point of given neuron: Map containing the + branching point for every predecessor neuron. + */ + std::pair getBBPSBranchingPoint( NeuronIndex index ); + + // Get PMNR neuron selection heuristic score for given neuron. + double getPMNRScore( NeuronIndex index ); /* Finds logically consecutive WS layers and merges them, in order @@ -253,12 +286,32 @@ class NetworkLevelReasoner : public LayerOwner List _boundTightenings; List _polygonalBoundTightenings; - std::unique_ptr _deepPolyAnalysis; + List> _infeasibleBranches; - void freeMemoryIfNeeded(); + std::unique_ptr _deepPolyAnalysis; List _constraintsInTopologicalOrder; + Map> _predecessorSymbolicLb; + Map> _predecessorSymbolicUb; + Map> _predecessorSymbolicLowerBias; + Map> _predecessorSymbolicUpperBias; + + Map> _outputSymbolicLb; + Map> _outputSymbolicUb; + Map> _outputSymbolicLowerBias; + Map> _outputSymbolicUpperBias; + + Map _neuronToPMNRScores; + + Map> _neuronToBBPSBranchingPoints; + Map> _neuronToSymbolicLbPerBranch; + Map> _neuronToSymbolicUbPerBranch; + Map> _neuronToSymbolicLowerBiasPerBranch; + Map> _neuronToSymbolicUpperBiasPerBranch; + + void freeMemoryIfNeeded(); + // Map each neuron to a linear expression representing its weighted sum void generateLinearExpressionForWeightedSumLayer( Map &variableToExpression, @@ -287,39 +340,14 @@ class NetworkLevelReasoner : public LayerOwner unsigned outputDimension ); void reduceLayerIndex( unsigned layer, unsigned startIndex ); - void optimizeBoundsWithPreimageApproximation( LPFormulator &lpFormulator ); - - // Estimate Volume of parameterised symbolic bound tightening. - double EstimateVolume( const Vector &coeffs ); - - void optimizeBoundsWithPMNR( LPFormulator &lpFormulator ); + // Return optimizable parameters which minimize parameterised SBT bounds' volume. + const Vector OptimalParameterisedSymbolicBoundTightening(); // Optimize biases of generated parameterised polygonal tightenings. - double - OptimizeSingleParameterisedPolygonalTightening( PolygonalTightening &tightening, - Vector &prevTightenings ); - - // Get current lower bound for selected parameterised polygonal tightenings' biases. - double - getParameterisdPolygonalTighteningLowerBound( const Vector &coeffs, - const Vector &gamma, - PolygonalTightening &tightening, - Vector &prevTightenings ); - - const Vector generatePolygonalTightenings(); - const Vector generatePolygonalTighteningsForPMNR(); - const Vector generatePolygonalTighteningsForInvprop(); - - // Heuristically select neurons and polygonal tightenings for PMNR. - const Vector selectConstraints(); - - - double getPMNRGradientScore( NeuronIndex index ) const; - double getPMNRBBPSScore( NeuronIndex index ) const; + const Vector OptimizeParameterisedPolygonalTightening(); - const Vector selectConstraintsForPMNRRandom(); - const Vector selectConstraintsForPMNRGradient(); - const Vector selectConstraintsForPMNRBBPS(); + // Estimate Volume of parameterised symbolic bound tightening. + double EstimateVolume( const Vector &coeffs ); // Return difference between given point and upper and lower bounds determined by parameterised // SBT relaxation. @@ -327,20 +355,37 @@ class NetworkLevelReasoner : public LayerOwner Map &point, unsigned i ) const; - // Get map containing vector of optimizable parameters for parameterised SBT relaxation for - // every layer index. - const Map> - getParametersForLayers( const Vector &coeffs ) const; - - // Get number of optimizable parameters for parameterised SBT relaxation per layer type. - unsigned getNumberOfParametersPerType( Layer::Type t ) const; + // Heuristically generating optimizable polygonal tightening for INVPROP or PMNR. + const Vector generatePolygonalTightenings(); + const Vector generatePolygonalTighteningsForPMNR(); + const Vector generatePolygonalTighteningsForInvprop(); - // Get all indices of active non-weighted sum layers for INVPROP. - const Vector getLayersWithNonFixedNeurons() const; + // Heuristically select neurons for PMNR. + const Vector selectPMNRNeurons(); + const Vector selectPMNRNeuronsRandomly(); + const Vector selectPMNRNeuronsHeuristically(); - const Vector getNonFixedNeurons( const Layer *layer ) const; + // Optimize biases of generated parameterised polygonal tightenings. + double OptimizeSingleParameterisedPolygonalTightening( + PolygonalTightening &tightening, + Vector &prevTightenings, + bool maximize, + double feasibilityBound, + const Map &neuronToBranchIndex = Map( {} ) ); + + double OptimizeSingleParameterisedPolygonalTighteningWithBranching( + PolygonalTightening &tightening, + Vector &prevTightenings, + bool maximize, + double bound ); - bool isNeuronNonFixed( NeuronIndex index ) const; + // Get current lower bound for selected parameterised polygonal tightenings' biases. + double getParameterisdPolygonalTighteningBound( + const Vector &coeffs, + const Vector &gamma, + PolygonalTightening &tightening, + Vector &prevTightenings, + const Map &neuronToBranchIndex = Map( {} ) ); /* Store previous biases for each ReLU neuron in a map for getPreviousBias() @@ -349,13 +394,147 @@ class NetworkLevelReasoner : public LayerOwner Map _previousBiases; void initializePreviousBiasMap(); + // Calculate PMNRScore for every non-fixed neurons. void initializePMNRScoreMap(); - void initializeBBPSBranchingMap(); + double calculateNeuronPMNRScore( NeuronIndex index ); + double calculatePMNRGradientScore( NeuronIndex index ); + double calculatePMNRBBPSScore( NeuronIndex index ); + + // Initialize PMNR-BBPS branching point scores and per-branch predecessor symbolic bounds for + // every non-fixed neuron. + void initializeBBPSBranchingMaps(); + const std::pair calculateBranchingPoint( NeuronIndex index ) const; + + // Heuristically generate candidates for branching points. + const Vector> + generateBranchingPointCandidates( const Layer *layer, unsigned i ) const; + + // Helper functions for generating branching points. + const Vector> + generateBranchingPointCandidatesAtZero( const Layer *layer, unsigned i ) const; + const Vector> + generateBranchingPointCandidatesForRound( const Layer *layer, unsigned i ) const; + const Vector> + generateBranchingPointCandidatesForSigmoid( const Layer *layer, unsigned i ) const; + const Vector> + generateBranchingPointCandidatesForMax( const Layer *layer, unsigned i ) const; + const Vector> + generateBranchingPointCandidatesForSoftmax( const Layer *layer, unsigned i ) const; + const Vector> + generateBranchingPointCandidatesForBilinear( const Layer *layer, unsigned i ) const; + + // Given neuron index, source index and branch ranges, compute symbolic bounds per branch. + // If activation has multiple sources, sources other than given neuron are concretized. + void calculateSymbolicBoundsPerBranch( NeuronIndex index, + NeuronIndex sourceIndex, + const Vector &values, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch, + unsigned branchCount ) const; + + // Helper functions for calculating branch symbolic bounds. + void + calculateSymbolicBoundsPerBranchForRelu( unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const; + void calculateSymbolicBoundsPerBranchForAbsoluteValue( + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const; + void + calculateSymbolicBoundsPerBranchForSign( unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const; + void + calculateSymbolicBoundsPerBranchForRound( unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const; + void + calculateSymbolicBoundsPerBranchForSigmoid( unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const; + void calculateSymbolicBoundsPerBranchForLeakyRelu( + NeuronIndex index, + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const; + void calculateSymbolicBoundsPerBranchForMax( NeuronIndex index, + NeuronIndex chosenSourceIndex, + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const; + void + calculateSymbolicBoundsPerBranchForSoftmax( NeuronIndex index, + NeuronIndex chosenSourceIndex, + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const; + void + calculateSymbolicBoundsPerBranchForBilinear( NeuronIndex index, + NeuronIndex chosenSourceIndex, + unsigned i, + double sourceLb, + double sourceUb, + Vector &symbolicLbPerBranch, + Vector &symbolicUbPerBranch, + Vector &symbolicLowerBiasPerBranch, + Vector &symbolicUpperBiasPerBranch ) const; + + // Calculate tightening loss of branch symbolic bounds. + double calculateTighteningLoss( const Vector &values, + const Vector &symbolicLbPerBranch, + const Vector &symbolicUbPerBranch, + const Vector &symbolicLowerBiasPerBranch, + const Vector &symbolicUpperBiasPerBranch, + unsigned branchCount ) const; + + // Get map containing vector of optimizable parameters for parameterised SBT relaxation for + // every layer index. + const Map> + getParametersForLayers( const Vector &coeffs ) const; + + // Get number of optimizable parameters for parameterised SBT relaxation per layer type. + unsigned getNumberOfParametersPerType( Layer::Type t ) const; + + // Determine whether activation type and PMNR strategy support branching before INVPROP. + bool supportsInvpropBranching( Layer::Type type ) const; - double calculateNeuronPMNRScore( NeuronIndex index ) const; - double calculatePMNRGradientScore( NeuronIndex index ) const; - double calculatePMNRBBPSScore( NeuronIndex index ) const; - Map calculateBranchingPoint( NeuronIndex index ) const; + // Get all indices of layers with non-fixed neurona. + const Vector getLayersWithNonfixedNeurons() const; /* If the NLR is manipulated manually in order to generate a new @@ -363,19 +542,6 @@ class NetworkLevelReasoner : public LayerOwner to all neurons in the network */ void reindexNeurons(); - - Map> _neuronToBBPSBranchingPoints; - Map _neuronToPMNRScores; - - Map> _outputSymbolicLb; - Map> _outputSymbolicUb; - Map> _outputSymbolicLowerBias; - Map> _outputSymbolicUpperBias; - - Map> _predecessorSymbolicLb; - Map> _predecessorSymbolicUb; - Map> _predecessorSymbolicLowerBias; - Map> _predecessorSymbolicUpperBias; }; } // namespace NLR diff --git a/src/nlr/tests/Test_DeepPolyAnalysis.h b/src/nlr/tests/Test_DeepPolyAnalysis.h index 77112d48e2..150bccb9c0 100644 --- a/src/nlr/tests/Test_DeepPolyAnalysis.h +++ b/src/nlr/tests/Test_DeepPolyAnalysis.h @@ -290,7 +290,7 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); /* Input ranges: @@ -823,6 +823,8 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite // Mark the Sigmoid sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); + + // Mark the Round sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); @@ -951,7 +953,6 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite { /* - x0 x3 S x6 x1 x4 S x7 @@ -1000,6 +1001,7 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 1, 2 ); nlr.setBias( 1, 2, 3 ); + // Mark the Softmax sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 0 ); nlr.addActivationSource( 1, 2, 2, 0 ); @@ -1010,7 +1012,6 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 1, 1, 2, 2 ); nlr.addActivationSource( 1, 2, 2, 2 ); - // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); @@ -1049,7 +1050,6 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 10, large ); } - void test_deeppoly_softmax1() { NLR::NetworkLevelReasoner nlr; @@ -1192,12 +1192,10 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite return false; } - void populateNetworkSoftmax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* - x0 x3 S x8 x1 x4 S x9 @@ -1268,6 +1266,7 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 3, 2 ); nlr.setBias( 1, 4, 1 ); + // Mark the Softmax sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 2, 2, 0 ); nlr.addActivationSource( 1, 4, 2, 0 ); @@ -1398,7 +1397,6 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite value = NLR::Layer::dERLowerBound( input, inputLb, inputUb, 0, 1 ); TS_ASSERT( FloatUtils::areEqual( value, -0.000838421, 0.00001 ) ); - Vector outputLb = { 0.2, 0, 0 }; Vector outputUb = { 0.4, 0.1, 0.1 }; @@ -1415,6 +1413,7 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite Vector inputLb = { -1, 0, 1 }; Vector inputUb = { 0, 2, 3 }; Vector input = { -0.5, 1, 2 }; + double value = NLR::Layer::LSELowerBound( input, inputLb, inputUb, 0 ); TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); value = NLR::Layer::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); @@ -1424,6 +1423,7 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite Vector outputLb = { 0.2, 0, 0 }; Vector outputUb = { 0.4, 0.1, 0.1 }; + value = NLR::Layer::LSEUpperBound( input, outputLb, outputUb, 0 ); TS_ASSERT( FloatUtils::areEqual( value, -0.164165, 0.00001 ) ); value = NLR::Layer::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); @@ -1436,7 +1436,6 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite { /* - x0 x2 x x4 -- x5 x1 x3 @@ -1465,10 +1464,10 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite nlr.setWeight( 0, 1, 1, 1, 1 ); nlr.setWeight( 2, 0, 3, 0, -1 ); + // Mark the Bilinear sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 0 ); - // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); @@ -1588,7 +1587,7 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite nlr.setBias( 5, 0, 1 ); - // Mark the ReLU sources + // Mark the LeakyReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); @@ -1792,16 +1791,5802 @@ class DeepPolyAnalysisTestSuite : public CxxTest::TestSuite } } - bool existsBounds( const List &bounds, Tightening bound ) + void populateNetworkDeepPolyRelu( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - for ( const auto &b : bounds ) - { - if ( b._type == bound._type && b._variable == bound._variable ) - { - if ( FloatUtils::areEqual( b._value, bound._value ) ) - return true; - } - } - return false; + /* + 2 R 1 + x0 --- x2 ---> x4 --- x6 + \ / / + 1 \ / / + \/ -1 / + /\ / + 3 / \ / + / \ R / + x1 --- x3 ---> x5 + 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + } + + void populateNetworkDeepPolyReluResidual1( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + -1 + __________________ + / \ + / 1 R -1 1 R 3 1 + x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 + \ / + \ 3 / + \________________________/ + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 2, NLR::Layer::RELU, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 4, NLR::Layer::RELU, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 1, 5 ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 3 ); + nlr.setWeight( 0, 0, 3, 0, -1 ); + nlr.setWeight( 1, 0, 5, 0, 3 ); + + nlr.setBias( 3, 0, 1 ); + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 3, 0, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 6 ); + tableau.setLowerBound( 1, -large ); + tableau.setUpperBound( 1, large ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + } + + void populateNetworkDeepPolyReluResidual2( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + -1 + __________________ + / \ + / 1 R -1 1 R 3 1 1 + x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 --- x6 + \ / + \ 1 / + \_______________________________/ + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 2, NLR::Layer::RELU, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 4, NLR::Layer::RELU, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 6, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 6; ++i ) + nlr.addLayerDependency( i - 1, i ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 0, 5 ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 3 ); + nlr.setWeight( 0, 0, 3, 0, -1 ); + nlr.setWeight( 0, 0, 5, 0, 1 ); + nlr.setWeight( 5, 0, 6, 0, 1 ); + + nlr.setBias( 3, 0, 1 ); + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 3, 0, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 1, -large ); + tableau.setUpperBound( 1, large ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + } + + void populateNetworkDeepPolyReluReindex( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 1 1 1 + x0 --- x2 x5 --- x6 x9 --- x10 + \ /\ /\ / \ / \ / + 1 \ / R\ /-1\ / R \ / 1 \ / + \/ \/ \/ \/ \/ + /\ /\ /\ /\ /\ + 1 / \ R/ \ 1/ \ R / \ 1 / \ + / \/ \/ \ / \ / 0 \ + x1 --- x3 x4 --- x7 x8 --- x11 + -1 1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 0 ); + + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 8 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkDeepPolyLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 LR 1 LR 1 1 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 1 \ / 0 \ / + \/ \/ \/ + /\ /\ /\ + 1 / \ 1 / \ 1 / \ + / \ LR / \ LR / 1 \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + -1 -1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + using LeakyReLU activation instead of ReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.2 ); + nlr.getLayer( 4 )->setAlpha( 0.2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 0 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + + nlr.setBias( 5, 0, 1 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkDeepPolySigmoidsAndRound( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + + 1 S 1 Rd + x0 --- x2 ---> x4 --- x6 --- x8 + \ / \ / + 1 \ / 1 \ / + \/ \/ + /\ /\ + 1 / \ 1 / \ + / \ S / \ Rd + x1 --- x3 ---> x5 --- x7 --- x9 + -1 -1 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 4; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Mark the Round sources + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 10 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + } + + void populateNetworkDeepPolyMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 R Max 2 + x0 --- x2 ---> x4 --- x6 ---> x7 + \ / / + 1 \ / / + \/ / + /\ / + 1 / \ / + / \ R / + x1 --- x3 ---> x5 + -1 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::MAX, 1 ); + nlr.addLayer( 4, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 4; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + nlr.setWeight( 3, 0, 4, 0, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Mark the Max sources + nlr.addActivationSource( 2, 0, 3, 0 ); + nlr.addActivationSource( 2, 1, 3, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 7 ); + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 8 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + } + + void populateNetworkDeepPolySoftmax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + x0 x3 S x6 + + x1 x4 S x7 + + x2 x5 S x8 + + x3 = x0 - x1 + x2 + 1 + x4 = -x0 + x1 + x2 + 2 + x5 = -x0 - x1 - x2 + 3 + + x6 x7 x8 = softmax(x3, x4, x5) + + x9 = x6 + x7 + x8 + x10 = - x6 - x7 - x8 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -1 ); + nlr.setWeight( 0, 0, 1, 2, -1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 2, -1 ); + nlr.setWeight( 0, 2, 1, 0, 1 ); + nlr.setWeight( 0, 2, 1, 1, 1 ); + nlr.setWeight( 0, 2, 1, 2, -1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 2 ); + nlr.setBias( 1, 2, 3 ); + + // Mark the Softmax sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 8 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 11 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + } + + void populateNetworkDeepPolySoftmax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + x0 x3 S x8 + + x1 x4 S x9 + + x2 x5 S x10 + + x6 S x11 + + x7 S x12 + + x3 = x0 - x1 + x2 + 1 + x4 = -x0 + x1 + x2 + 2 + x5 = -x0 - x1 - x2 + 3 + x6 = -x0 - x1 - x2 + 2 + x7 = -x0 - x1 - x2 + 1 + + x8 x10 x12 = softmax(x3, x5, x7) + + x9 x11 = softmax(x4, x6) + + x13 = x8 + x10 + x12 + x14 = -x8 - x10 - x12 + x15 = x9 + x11 + x16 = -x9 - x11 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 5 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 5 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 4 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -1 ); + nlr.setWeight( 0, 0, 1, 2, -1 ); + nlr.setWeight( 0, 0, 1, 3, -1 ); + nlr.setWeight( 0, 0, 1, 4, -1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 2, -1 ); + nlr.setWeight( 0, 1, 1, 3, -1 ); + nlr.setWeight( 0, 1, 1, 4, -1 ); + nlr.setWeight( 0, 2, 1, 0, 1 ); + nlr.setWeight( 0, 2, 1, 1, 1 ); + nlr.setWeight( 0, 2, 1, 2, -1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + nlr.setWeight( 0, 2, 1, 4, -1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 4, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + nlr.setWeight( 2, 4, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 2, 1 ); + nlr.setWeight( 2, 3, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 3, -1 ); + nlr.setWeight( 2, 3, 3, 3, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 2 ); + nlr.setBias( 1, 2, 3 ); + nlr.setBias( 1, 3, 2 ); + nlr.setBias( 1, 4, 1 ); + + // Mark the Softmax sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 4, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 4, 2, 2 ); + nlr.addActivationSource( 1, 0, 2, 4 ); + nlr.addActivationSource( 1, 2, 2, 4 ); + nlr.addActivationSource( 1, 4, 2, 4 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 3 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 4 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 4 ), 12 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 3 ), 16 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 17 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + } + + void populateNetworkDeepPolyBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + x0 x2 + x x4 -- x5 + x1 x3 + + x2 = x0 - 2 * x1 + x3 = x0 + x1 + x4 = -x5 + + x4 = x2 * x3 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -2 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + + // Mark the Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 5 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 6 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + } + + void test_deeppoly_relus_all_active2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both ReLUs active, bound survive through activations: + + x2 <= x4 <= x2 + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => x2 - x3 <= x6 <= x2 - x3 + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_relus_active_and_inactive2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 30 + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + 0 <= x4 <= 0 + x4.lb = 0 + x4.ub = 0 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> -x3 <= x6 <= -x3 + x6.lb = -x0 - x1 : [-11, -5] + x6.ub = -x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_relus_active_and_not_fixed2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. 12 = ub > -lb = 4, using ReLU lower + coefficient of 1. Upper coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [-4, 12] + x2 <= x4 <= 0.75 x2 + 3 + x4.lb = 2x0 + 3x1 - 15 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> x2 - x3 <= x6 <= 0.75x2 - x3 + 3 + x6.lb = x0 + 2x1 - 15 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [4 + 2 - 15 = -9, 3 + 6.25 - 8.25 = 1] = [-9, 1] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -4, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -9, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_relus_active_and_externally_fixed2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation + + 0 <= x4 <= 0 + x4.lb = 0 + x4.ub = 0 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> -x3 <= x6 <= -x3 + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_relu_residual3() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyReluResidual1( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layers 1. 2: + + x1 = x0 + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. 1 = ub <= -lb = 1, using ReLU lower + coefficient of 0. Upper coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + 0 <= x2 <= 0.5x1 + 0.5 + x2.lb = 0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layers 3, 4 (with residual from x0): + + x3 = - x2 - x0 + 1 + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] + x3.ub = -1( 0 ) -1x0 + 1 = -x0 + 1 : [0, 2] + x3 range: [-1, 2] + + ReLU is undecided, bound is concretized. 2 = ub > -lb = 1, using ReLU lower + coefficient of 1. Upper coefficient: 2/( 2--1 ) = 2/3. + + x3 <= x4 <= 2/3 x3 + 2/3 + x4.lb = -1.5x0 + 0.5 + x4.ub = 2/3 ( -x0 + 1 ) + 2/3 = -2/3 x0 + 4/3 : [1, 2] + x4 range: [-1, 2] + + Layer 5 (with residual from x1): + + x5 = 3x4 + 3x1 + 1 + x5.lb = 3 ( -1.5x0 + 0.5 ) + 3 ( x0 ) + 1 = -1.5x0 + 2.5 : [1, 4] + x5.ub = 3 ( -2/3 x0 + 4/3 ) + 3 ( x0 ) + 1 = x0 + 5 : [4, 6] + x5 range: [1, 6] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 6, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_relu_residual4() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyReluResidual2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layers 1, 2: + + x1 = x0 + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. 1 = ub <= -lb = 1, using ReLU lower + coefficient of 0. Upper coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + 0.5 x1 <= x2 <= 0.5x1 + 0.5 + x2.lb = 0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layers 3, 4 (with residual from x0): + + x3 = - x2 - x0 + 1 + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] + x3.ub = -1( 0 ) -1x0 + 1 = -x0 + 1 : [0, 2] + x3 range: [-1, 2] + + ReLU is undecided, bound is concretized. 2 = ub > -lb = 1, using ReLU lower + coefficient of 1. Upper coefficient: 2/( 2--1 ) = 2/3. + + x3 <= x4 <= 2/3 x3 + 2/3 + x4.lb = -1.5x0 + 0.5 + x4.ub = 2/3 ( -x0 + 1 ) + 2/3 = -2/3 x0 + 4/3 : [1, 2] + x4 range: [-1, 2] + + Layer 5 (with residual from x0): + + x5 = 3x4 + x0 + 1 + x5.lb = 3 ( -1.5x0 + 0.5 ) + 1 ( x0 ) + 1 = -3.5x0 + 2.5 : [-1, 6] + x5.ub = 3 ( -2/3 x0 + 4/3 ) + 1 ( x0 ) + 1 = -x0 + 5 : [4, 6] + x5 range: [-1, 6] + + Layer 6: + x6 = x5 + x6.lb = -3.5x0 + 2.5 : [-1, 6] + x6.ub = -x0 + 5 : [4, 6] + x6 range: [-1, 6] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), + Tightening( 5, 6, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, 6, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_relu_reindex2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyReluReindex( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layers 1, 2: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both ReLUs are undecided, bounds are concretized. 2 = ub <= -lb = 2, using ReLU lower + coefficient of 0. Upper coefficient: 2/( 2--2 ) = 2/4 = 0.5 + + 0 <= x4 <= 0.5x2 + 1 + x4.lb = 0 + x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 + x4 range: [0, 2] + + 0 <= x5 <= 0.5x3 + 1 + x5.lb = 0 + x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 + x5 range: [0, 2] + + Layers 3, 4: + + x6 = x4 + x5 + x6.lb = 1 ( 0 ) + 1 ( 0 ) = 0 : [0, 0] + x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] + x6 range: [0, 3] + + x7 = x4 - x5 + x7.lb = 1 ( 0 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = - 0.5x0 + 0.5x1 - 1 : [-2, 0] + x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0 ) = 0.5x0 + 0.5x1 + 1 : [0, 2] + x7 range: [-2, 2] + + First ReLU is active, bounds surive the activation + Second ReLUs is undecided, bound is concretized. 2 = ub <= -lb = 2, using ReLU lower + coefficient of 0. Upper coefficient (second ReLU): 2/( 2--2 ) = 2/4 = 0.5 + + x6 <= x8 <= x6 + x8.lb = 0 + x8.ub = x0 + 2 + x8 range: [0, 3] + + 0 <= x9 <= 0.5 x7 + 1 + x9.lb = 0 + x9.ub = 0.5 ( 0.5x0 + 0.5x1 + 1 ) + 1 = 0.25x0 + 0.25x1 + 1.5 + x9 range: [0, 2] + + Layer 5: + x10 = x8 + x9 + 1 + x10.lb = 1 ( 0 ) + 1 ( 0 ) + 1 = 1 : [1, 1] + x10.ub = 1 ( x6 ) + 1 ( 0.5 x7 + 1 ) + 1 = 1 ( x4 + x5 ) + 1 ( 0.5 x4 - 0.5x5 + 1 ) + 1 + = 1.5x4 + 0.5x5 + 2 <= 0.75x2 + 0.25x3 + 4 = x0 + 0.5x1 + 4 : [2.5, 5.5] + x10 range: [1, 5.5] + + x11 = x9 + x11.lb = 0 + x11.ub = 0.25x0 + 0.25x1 + 1.5 + x11 range: [0, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, 1, Tightening::LB ), Tightening( 10, 5.5, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_abs_all_positive2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both absolute values positive, bound survive through activations: + + x2 <= x4 <= x2 + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + x5 = x4 - x5 + => x2 - x3 <= x5 <= x2 - x3 + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_abs_positive_and_negative2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + x2 = 2x0 + 3x1 - 30 + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + -x2 <= x4 <= -x2 + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + x5 = x4 - x5 + => -x2 - x3 <= x5 <= -x2 - x3 + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_absolute_values_positive_and_not_fixed2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is undecided, bounds are concretized. + Second absolute value is active, bounds surive the activation + + 0 <= x4 <= 12 + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => -x3 <= x6 <= -x3 + 12 + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_absolute_values_active_and_externally_fixed2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + -x2 <= x4 <= -x2 + x4: all set to 3 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => -x2 - x3 <= x6 <= -x2 - x3 + => -x3 + 3 <= x6 <= -x3 + 3 + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_signs_positive_and_not_fixed2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is undecided, bounds are concretized. + Second sign is active, bounds become constant 1 + Coefficient (first Sign, lower): 2/12 = 1/6. + Coefficient (first Sign, upper): -2/-4 = 1/2. + + 1/6 x2 - 1 <= x4 <= 1/2 x2 + 1 + x4.lb = 1/6 ( 2x0 + 3x1 - 15 ) - 1 = 2/6 x0 + 3/6 x1 - 21/6 + x4.ub = 1/2 ( 2x0 + 3x1 - 15 ) + 1 = x0 + 1.5x1 - 6.5 + x4 range: [-1, 1] + + 1 <= x5 <= 1 + x5.lb = 1 + x5.ub = 1 + x5 range: [1, 1] + + Layer 3: + + x6 = x4 - x5 : [-2, 0] + => 1/6 x2 - 2 <= x6 <= 1/2 x2 : [-8/3, 6] + x6.lb = 1 ( 2/6 x0 + 3/6 x1 - 21/6 ) - 1 ( 1 ) = 1/3 x0 + 1/2 x1 - 4.5 : [-16/6, 0] + x6.ub = 1 ( x0 + 1.5x1 - 6.5 ) - 1 ( 1 ) = x0 + 1.5x1 - 7.5 : [-2, 6] + + x6 range: [-2, 0] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, 0, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_signs_active_and_externally_fixed2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is negative, bounds become constant -1 + Second sign is positive, bounds become constant 1 + + -1 <= x4 <= 1 + x4: all set to -1 + + 1 <= x5 <= 1 + x5: all set to 1 + + Layer 3: + + x6 = x5 - x4 + x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 + x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, -1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_leaky_relu2() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyLeakyReLU( nlr, tableau ); // alpha = 0.2 + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layers 1, 2: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Bias: ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 + + x2 <= x4 <= 0.6 x2 + 0.8 + x4.lb = x0 + x1 + x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6x0 + 0.6x1 + 0.8 + x4 range: [-2, 2] + + x3 <= x5 <= 0.6 x3 + 0.8 + x5.lb = x0 - x1 + x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6x0 - 0.6x1 + 0.8 + x5 range: [-2, 2] + + Layers 3, 4: + + x6 = x4 + x5 + => x2 + x3 <= x6 <= 0.6 x2 + 0.6 x3 + 1.6 + x6.lb = 1 ( x0 + x1 ) + 1 ( x0 - x1 ) = 2x0 : [-2, 2] + x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2x0 + 1.6 : [0.4, 2.8] + x6 range: [-2, 2.8] + + x7 = x4 - x5 + => x2 - 0.6x3 - 0.8 <= x6 <= 0.6 x2 - x3 + 0.8 + x7.lb = 1 ( x0 + x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( x0 - x1 ) = -0.4x0 + 1.6x1 + 0.8 : [-1.2, 2.8] + x7 range: [-2.8, 2.8] + + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient (first LeakyReLU): ( 2.8 - 0.2*-2 )/( 2.8--2 ) = 3.2/4.8 = 10/15 = 2/3 + Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2 / ( 2.8--2 ) = 14/15 + + Coefficient (second LeakyReLU): ( 2.8 - 0.2*-2.8 )/( 2.8--2.8 ) = 3.36/5.6 = 0.6 + Bias (second LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2.8 / ( 2.8--2.8 ) = 1.12 + + x6 <= x8 <= 10/15 x6 + 14/15 + x8.lb = 2x0 + x8.ub = 10/15 ( 1.2x0 + 1.6 ) + 14/15 = 0.8x0 + 2 + x8 range: [-2, 2.8] + + x7 <= x9 <= 0.6x7 + 1.12 + x9.lb = 0.4x0 + 1.6x1 - 0.8 + x9.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 + x9 range: [-0.56, 2.8] + + Layer 5: + + x10 = x8 + x9 + 1 + => x6 + x7 + 1 <= x10 <= 2/3 x6 + 0.6 x7 + 229/75 + => 2x4 + 1 <= x10 <= 19/15 x4 + 1/15 x5 + 229/75 + => 2x2 + 1 <= x10 <= 0.76 x2 + 0.04 x3 + 4.12 + x10.lb = 2x0 + 2x1 + 1 : [-3, 5] + x10.ub = 0.8 x0 + 0.72 x1 + 4.12 : [2.6, 5.64] + x10 range: [-3, 5.64] + + x11 = x9 + => x7 <= x11 <= 0.6x7 + 1.12 + => x4 - x5 <= x11 <= 0.6x4 - 0.6x5 + 1.12 + => x2 - 0.6x3 - 0.8 <= x11 <= 0.36 x2 - 0.6 x3 + 1.6 + x11.lb = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x11.ub = -0.24 x0 + 0.96 x1 + 1.6 : [0.4, 2.8] + x11 range: [-2.8, 2.8] + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -2, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -2, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), + Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), + Tightening( 9, -2.8, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 5.64, Tightening::UB ), + Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_sigmoids_and_round2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySigmoidsAndRound( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + // Layer 1 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); + + // Layer 2 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); + + // Layer 3 + /* + Double-check with Python + --- + from math import exp as e + def g(x): + return 1 / (1 + e(-x)) + + def g_prime(x): + return g(x) * (1 - g(x)) + + def lam(l, u): + return (g(u) - g(l)) / (u - l) + + def lam_prime(l, u): + return min(g_prime(l), g_prime(u)) + + l3 = l4 = -2 + u3 = u4 = 2 + l5 = l6 = g(-2) + u5 = u6 = g(2) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) + x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) + x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) + x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) + print(x7_l) + print(x7_u) + print(x8_l) + print(x8_u) + + ''' + Sigmoid linear relaxation ( Layer 2 ): + x4 >= lambda7_prime * x2 + ( g(l3) - lambda7_prime * l3 ) + x4 <= lambda7_prime * x2 + ( g(u3) - lambda7_prime * u3 ) + x5 >= lambda8_prime * x3 + ( g(l4) - lambda8_prime * l4 ) + x5 <= lambda8_prime * x3 + ( g(u4) - lambda7_prime * u4 ) + ''' + print('------------------') + print(lambda7_prime) + print(lambda8_prime) + print(g(l3) - lambda7_prime * l3) + print(g(u3) - lambda7_prime * u3) + print(g(l4) - lambda8_prime * l4) + print(g(u4) - lambda8_prime * u4) + + --- + [output]: + 0.4483930148512481 + 1.5516069851487517 + -0.5516069851487517 + 0.5516069851487517 + ------------------ + 0.1049935854035065 + 0.1049935854035065 + 0.3291900928291306 + 0.6708099071708693 + 0.3291900928291306 + 0.6708099071708693 + */ + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); + + // Layer 4 + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + } + + void test_deeppoly_max_not_fixed2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 2] + + Layers 1, 2, 3: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 3] + x2.ub = x0 + x1 : [-2, 3] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-3, 2] + x3.ub = x0 - x1 : [-3, 2] + + Both ReLUs are undecided, bounds are concretized. + First ReLU: 3 = ub > -lb = 2, using lower ReLU coefficient of 1. + Upper coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6. + First ReLU: 2 = ub <= -lb = 3, using lower ReLU coefficient of 0. + Upper coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 + + x2 <= x4 <= 0.6 x2 + 1.2 + x4.lb = x0 + x1 + x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 + x4 range: [-2, 3] + + 0 <= x5 <= 0.4 x3 + 1.2 + x5.lb = 0 + x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 + x5 range: [0, 2] + + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x5, and its upper bound is constant 3. + + x5 <= x6 <= 3 + x6.lb = 0 : [0, 0] + x6.ub = 3 : [3, 3] + x6 range: [0, 3] + + Layer 4: + + x7 = 2x6 + => 2x5 <= x7 <= 6 + x7.lb = 2 ( 0 ) = 0 : [0, 0] + x7.ub = 2 ( 3 ) = 6 : [6, 6] + x7 range: [0, 6] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 3, Tightening::UB ), + Tightening( 3, -3, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -2, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 3, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), + Tightening( 7, 6, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_max_fixed2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyMax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -3 ); + tableau.setUpperBound( 1, -2 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-3, -2] + + Layer 1: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 0] + x2.ub = x0 + x1 : [-2, 0] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [3, 5] + x3.ub = x0 - x1 : [3, 5] + + First ReLU is negative, bounds become constant 0 + Second ReLU is positive, bounds survive the activation + + 0 <= x4 <= 0 + x4: all set to 0 + + x3 <= x5 <= x3 + x5.lb = x0 - x1 : [3, 5] + x5.ub = x0 - x1 : [3, 5] + + Max is fixed because x5.lb > x4.ub, it inherits x5's bounds + + x5 <= x6 <= x5 + => x3 <= x6 <= x5 + x6.lb = x0 - x1 : [3, 5] + x6.ub = x0 - x1 : [3, 5] + + Layer 3: + + x7 = 2x6 + => x7 = 2x5 = 2x3 = 2x0 - 2x1 + x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 0, Tightening::UB ), + Tightening( 3, 3, Tightening::LB ), + Tightening( 3, 5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 3, Tightening::LB ), + Tightening( 5, 5, Tightening::UB ), + Tightening( 6, 3, Tightening::LB ), + Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 6, Tightening::LB ), + Tightening( 7, 10, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_softmax4() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + } + + void test_deeppoly_softmax5() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + */ + + unsigned size = nlr.getLayer( 2 )->getSize(); + Vector sourceLbs = { 1.999899, 2.999899, -0.000003 }; + Vector sourceUbs = { 2.000102, 3.000102, 0.0001 }; + Vector sourceMids = { 2.0000005, 3.0000005, -0.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = + NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.4243, 0.4481, 0.1277 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.4243, 0.4480, 0.1277 } ) ) ); + + /* + Layer 2: + +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6 range: [ 0.2595, 0.2595 ] + +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 + x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 + x7 range: [ 0.7054, 0.7054 ] + +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277 + x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8 range: [ 0.0351, 0.0351 ] + + Layer 3: + + x9 = x6 + x7 + x8 + => x9 = ( 0.1922 - 0.1830 - 0.0091 ) x3 + ( -0.1830 + 0.2078 - 0.0248 ) x4 + ( + -0.0091 - 0.0248 + 0.0339 ) x5 + ( 0.4243 + 0.4481 + 0.1277 ) + + => x9 = 0.0001 x3 + 0 x4 + 0 x5 + 1.0001 + => ( Up to rounding ) 1 <= x9 <= 1. + x9.lb = 1 + x9.ub = 1 + x9 range: [ 1, 1 ] + + x10 = - x6 - x7 - x8 + => x10 = - ( 0.1922 - 0.1830 - 0.0091 ) x3 - ( -0.1830 + 0.2078 - 0.0248 ) x4 - ( + -0.0091 - 0.0248 + 0.0339 ) x5 - ( 0.4243 + 0.4481 + 0.1277 ) + + => x10 = - 0.0001 x3 - 0.0000 x4 - 0.0000 x5 - 1.0001 + => ( Up to rounding ) 1 <= x10 <= 1. + x10.lb = 1 + x10.ub = 1 + x10 range: [ -1, -1 ] + */ + + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + */ + + unsigned size = nlr.getLayer( 2 )->getSize(); + Vector sourceLbs = { 1.999899, 2.999899, -0.000003 }; + Vector sourceUbs = { 2.000102, 3.000102, 0.0001 }; + Vector sourceMids = { 2.0000005, 3.0000005, -0.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::ERLowerBound( sourceMids, sourceLbs, sourceUbs, i ); // Using er + symbolicUpperBias[i] = + NLR::Layer::ERUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dERLowerBound( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dERUpperBound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.4243, 0.4481, 0.1277 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.4243, 0.4480, 0.1277 } ) ) ); + + /* + Layer 2: + +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6 range: [ 0.2595, 0.2595 ] + +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 + x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 + x7 range: [ 0.7054, 0.7054 ] + +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277 + x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8 range: [ 0.0351, 0.0351 ] + + Layer 3: + + x9 = x6 + x7 + x8 + => x9 = ( 0.1922 - 0.1830 - 0.0091 ) x3 + ( -0.1830 + 0.2078 - 0.0248 ) x4 + ( + -0.0091 - 0.0248 + 0.0339 ) x5 + ( 0.4243 + 0.4481 + 0.1277 ) + + => x9 = 0.0001 x3 + 0 x4 + 0 x5 + 1.0001 + => ( Up to rounding ) 1 <= x9 <= 1. + x9.lb = 1 + x9.ub = 1 + x9 range: [ 1, 1 ] + + x10 = - x6 - x7 - x8 + => x10 = - ( 0.1922 - 0.1830 - 0.0091 ) x3 - ( -0.1830 + 0.2078 - 0.0248 ) x4 - ( + -0.0091 - 0.0248 + 0.0339 ) x5 - ( 0.4243 + 0.4481 + 0.1277 ) + + => x10 = - 0.0001 x3 - 0.0000 x4 - 0.0000 x5 - 1.0001 + => ( Up to rounding ) 1 <= x10 <= 1. + x10.lb = 1 + x10.ub = 1 + x10 range: [ -1, -1 ] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + } + + void test_deeppoly_softmax6() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySoftmax2( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + + x6 = -x0 - x1 - x2 + 2 + x6.lb = -x0 - x1 - x2 + 2 : [ -1.000003, -1 ] + x6.ub = -x0 - x1 - x2 + 2 : [ -1.000003, -1 ] + x6 range: [ -1.000003, -1 ] + + x7 = -x0 - x1 - x2 + 1 + x7.lb = -x0 - x1 - x2 + 1 : [ -2.000003, -2 ] + x7.ub = -x0 - x1 - x2 + 1 : [ -2.000003, -2 ] + x7 range: [ -2.000003, -2 ] + */ + + // First Sigmoid: x8 x10 x12 = softmax( x3, x5, x7 ). + unsigned size = nlr.getLayer( 2 )->getActivationSources( 0 ).size(); + Vector sourceLbs = { 1.999899, -0.000003, -2.000103 }; + Vector sourceUbs = { 2.000102, 0.0001, -1.999 }; + Vector sourceMids = { 2.0000005, -0.0000015, -2.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.8668, 0.1173, 0.0159 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.8668, 0.1173, 0.0159 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1155, + -0.1017, + -0.0138, + -0.1017, + 0.1035, + -0.0019, + -0.0138, + -0.0019, + 0.0156 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1154, + -0.1017, + -0.0138, + -0.1017, + 0.1036, + -0.0019, + -0.0138, + -0.0019, + 0.0156 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.6084, 0.3170, 0.0747 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.6084, 0.3170, 0.0747 } ) ) ); + + // Second Sigmoid: x9 x11 = softmax( x4, x6 ). + size = nlr.getLayer( 2 )->getActivationSources( 1 ).size(); + sourceLbs = Vector( { 2.999899, -1.000103 } ); + sourceUbs = Vector( { 3.000102, -0.9999 } ); + sourceMids = Vector( { 3.0000005, -1.0000015 } ); + targetLbs = Vector( size, 0 ); + targetUbs = Vector( size, 0 ); + symbolicLb = Vector( size * size, 0 ); + symbolicUb = Vector( size * size, 0 ); + symbolicLowerBias = Vector( size, 0 ); + symbolicUpperBias = Vector( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.9820, 0.0180 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.9820, 0.0180 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLb, Vector( { 0.0177, -0.0177, -0.0177, 0.0177 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUb, Vector( { 0.0177, -0.0177, -0.0177, 0.0177 } ) ) ); + TS_ASSERT( compareVectors( symbolicLowerBias, Vector( { 0.9114, 0.0886 } ) ) ); + TS_ASSERT( compareVectors( symbolicUpperBias, Vector( { 0.9114, 0.0886 } ) ) ); + + /* + Layer 2: + + First Sigmoid: x8 x10 x12 = softmax( x3, x5, x7 ). +0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1154 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 + x8.lb = 0.2310 x0 + 0.0001 x1 + 0.2310 x2 + 0.4051 + x8.ub = 0.2310 x0 + 0.0000 x1 + 0.2310 x2 + 0.4050 + x8 range: [ 0.8668, 0.8668 ] + +-0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + 0.3170 + x10.lb = -0.2033 x0 + 0.0001 x1 - 0.2033 x2 + 0.5239 + x10.ub = -0.2033 x0 + 0.0000 x1 - 0.2033 x2 + 0.5241 + x10 range: [ 0.1173, 0.1173 ] + +-0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 + x12.lb = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 + x12.ub = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 + x12 range: [ 0.0159, 0.0159 ] + + Second Sigmoid: x9 x11 = softmax( x4, x6 ). +0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 + x9.lb = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 + x9.ub = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 + x9 range: [ 0.9820, 0.0180 ] + +-0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 + x11.lb = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 + x11.ub = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 + x11 range: [ 0.9820, 0.0180 ] + + Layer 3: + + x13 = x8 + x10 + x12 + => x13 = ( 0.1155 - 0.1017 - 0.0138 ) x3 + ( -0.1017 + 0.1035 - 0.0019 ) x5 + + ( -0.0138 - 0.0019 + 0.0156 ) x7 + ( 0.6084 + 0.3170 + 0.0747 ) + + => x13 = 0 x3 - 0.0001 x5 - 0.0001 x7 + 1.0001 + => ( Up to rounding ) 1 <= x13 <= 1. + x13.lb = 1 + x13.ub = 1 + x13 range: [ 1, 1 ] + + x14 = - x8 - x10 - x12 + => x14 = - ( 0.1155 - 0.1017 - 0.0138 ) x3 - ( -0.1017 + 0.1035 - 0.0019 ) x5 + - ( -0.0138 - 0.0019 + 0.0156 ) x7 - ( 0.6084 + 0.3170 + 0.0747 ) + + => x14 = 0 x3 + 0.0001 x5 + 0.0001 x7 - 1.0001 + => ( Up to rounding ) -1 <= x14 <= -1. + x14.lb = -1 + x14.ub = -1 + x14 range: [ -1, -1 ] + + x15 = x9 + x11 + => x15 = ( 0.0177 - 0.0177 ) x4 + ( -0.0177 + 0.0177 ) x6 + ( 0.9114 + 0.0886 ) + + => x15 = 0 x4 + 0 x6 + 1 + => ( Up to rounding ) 1 <= x15 <= 1. + x15.lb = 1 + x15.ub = 1 + x15 range: [ 1, 1 ] + + x16 = - x9 - x11 + => x16 = - ( 0.0177 - 0.0177 ) x4 - ( -0.0177 + 0.0177 ) x6 - ( 0.9114 + 0.0886 ) + + => x16 = 0 x4 + 0 x6 - 1 + => ( Up to rounding ) -1 <= x16 <= -1. + x16.lb = -1 + x16.ub = -1 + x16 range: [ -1, -1 ] + */ + + List expectedBounds( { + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.86681, Tightening::LB ), Tightening( 8, 0.86682, Tightening::UB ), + Tightening( 9, 0.98201, Tightening::LB ), Tightening( 9, 0.98201, Tightening::UB ), + Tightening( 10, 0.11731, Tightening::LB ), Tightening( 10, 0.11731, Tightening::UB ), + Tightening( 11, 0.017985, Tightening::LB ), Tightening( 11, 0.017986, Tightening::UB ), + Tightening( 12, 0.015875, Tightening::LB ), Tightening( 12, 0.015876, Tightening::UB ), + Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), + Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), + Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_deeppoly_bilinear2() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false ) ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-2, 1] + + Layers 1, 2: + + x2 = x0 - 2x1 + x2.lb = x0 - 2x1 : [-1, 6] + x2.ub = x0 - 2x1 : [-1, 6] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [-1, 3] + x3.ub = x0 + x1 : [-1, 3] + + Coefficients for bilinear layer: + Lower bound: + alpha_l = x3.lb = -1 + beta = x2.lb = -1 + gamma_l = -x2.lb x3.lb = --1 * -1 = -1 + + Upper bound: + alpha_u = x3.ub = 3 + beta = x2.lb = -1 + gamma_u = -x2.lb x3.ub = --1 * 3 = 3 + + -x2 - x3 - 1 <= x4 <= 3x2 - x3 + 3 + x4.lb = -1 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + -1 = -2x0 + x1 - 1 : [-7, -2] + x4.ub = 3 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + 3 = 2x0 - 7x1 + 3 : [0, 21] + x4 range: [-6, 18] + + Layer 3: + + x5 = -x4 + => -3x2 + x3 - 3 <= x4 <= x2 + x3 + 1 + x5.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] + x5.ub = -1 ( -2x0 + x1 - 1 ) = 2x0 - x1 + 1 : [2, 7] + x5 range: [-18, 6] + */ + + List expectedBounds( { Tightening( 2, -1, Tightening::LB ), + Tightening( 2, 6, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 3, Tightening::UB ), + Tightening( 4, -6, Tightening::LB ), + Tightening( 4, 18, Tightening::UB ), + Tightening( 5, -18, Tightening::LB ), + Tightening( 5, 6, Tightening::UB ) } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_relus_all_active() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both ReLUs active, bound survive through activations: + + x2 <= x4 <= x2 + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => x2 - x3 <= x6 <= x2 - x3 + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 30 + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + 0 <= x4 <= 0 + x4.lb = 0 + x4.ub = 0 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> -x3 <= x6 <= -x3 + x6.lb = -x0 - x1 : [-11, -5] + x6.ub = -x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [-2, 12] + 0.5 x2 <= x4 <= 0.75 x2 + 3 + x4.lb = 0.5 ( 2x0 + 3x1 - 15 ) = x0 + 1.5 x1 - 7.5 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> 0.5 x2 - x3 <= x6 <= 0.75x2 - x3 + 3 + x6.lb = 0.5 x1 - 7.5 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [0.5 - 7.5 = -7, 3 + 6.25 - 8.25 = 1] = [-7, 1] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -2, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -7, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_relus_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation + + 0 <= x4 <= 0 + x4.lb = 0 + x4.ub = 0 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + ==> -x3 <= x6 <= -x3 + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_relu_residual1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyReluResidual1( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layers 1. 2: + + x1 = x0 + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + 0.5 x1 <= x2 <= 0.5x1 + 0.5 + x2.lb = 0.5 x0 + x2.ub = 0.5 x0 + 0.5 + x2 range: [-0.5, 1] + + Layers 3, 4 (with residual from x0): + + x3 = - x2 - x0 + 1 + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5 x0 + 0.5 : [-1, 2] + x3.ub = -1( 0.5 x0 ) -1x0 + 1 = -1.5 x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + 0.5 x3 <= x4 <= 5/7 x3 + 5/7 + x4.lb = 0.5 ( -1.5 x0 + 0.5 ) = -0.75 x0 + 0.25 : [-0.5, 1] + x4.ub = 5/7 ( -1.5 x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [1, 35/14 = 2.5] + x4 range: [-0.5, 2.5] + + Layer 5 (with residual from x1): + + x5 = 3x4 + 3x1 + 1 + x5.lb = 3 ( -0.75 x0 + 0.25 ) + 3 ( x0 ) + 1 = 0.75x0 + 1.75 : [1, 2.5] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] + x5 range: [1, 5.5] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, -0.5, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 5.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_relu_residual2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyReluResidual2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layers 1, 2: + + x1 = x0 + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper cCoefficient: 1/( 1--1 ) = 1/2 = 0.5 + + 0.5 x1 <= x2 <= 0.5x1 + 0.5 + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [-0.5, 1] + + Layers 3, 4 (with residual from x0): + + x3 = - x2 - x0 + 1 + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] + x3.ub = -1( 0.5 x0 ) -1x0 + 1 = -1.5 x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + 0.5 x3 <= x4 <= 5/7 x3 + 5/7 + x4.lb = 0.5 ( -1.5 x0 + 0.5 ) = -0.75 x0 + 0.25 : [-0.5, 1] + x4.ub = 5/7 ( -1.5 x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [1, 35/14 = 2.5] + x4 range: [-0.5, 2.5] + + Layer 5 (with residual from x0): + + x5 = 3x4 + x0 + 1 + x5.lb = 3 ( -0.75 x0 + 0.25 ) + ( x0 ) + 1 = -1.25x0 + 1.75 : [0.5, 3] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] + x5 range: [0.5, 7.5] + + Layer 6: + x6 = x5 + x6.lb = -1.25x0 + 1.75 : [0.5, 3] + x6.ub = -31/14 x0 + 74/14 : [43/14, 7.5] + x6 range: [0.5, 7.5] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, -0.5, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), + Tightening( 5, 7.5, Tightening::UB ), + Tightening( 6, 0.5, Tightening::LB ), + Tightening( 6, 7.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_relu_reindex() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyReluReindex( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layers 1, 2: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both ReLUs are undecided, bounds are concretized. Using custom ReLU lower + coefficient of 0.5. Upper coefficient: 2/( 2--2 ) = 2/4 = 0.5 + + 0.5 x2 <= x4 <= 0.5x2 + 1 + x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 + x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 + x4 range: [-1, 2] + + 0.5 x3 <= x5 <= 0.5x3 + 1 + x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 + x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 + x5 range: [-1, 2] + + Layers 3, 4: + + x6 = x4 + x5 + x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] + x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] + x6 range: [-1, 3] + + x7 = x4 - x5 + x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] + x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] + x7 range: [-2, 2] + + Both ReLUs are undecided, bounds are concretized. Using custom ReLU lower + coefficient of 0.5. + Upper coefficient (first ReLU): 3/( 3--1 ) = 3/4 = 0.75 + Upper coefficient (second ReLU): 2/( 2--2 ) = 2/4 = 0.5 + + 0.5 x6 <= x8 <= 0.75 x6 + 0.75 + x8.lb = 0.5 ( x0 ) = 0.5 x0 + x8.ub = 0.75 ( x0 + 2 ) + 0.75 = 0.75 x0 + 2.25 + x8 range: [-0.5, 3] + + 0.5 x7 <= x9 <= 0.5 x7 + 1 + x9.lb = 0.5 ( x1 - 1 ) = 0.5 x1 - 0.5 + x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 + x9 range: [-1, 2] + + Layer 5: + x10 = x8 + x9 + 1 + x10.lb = 1 ( 0.5 x6 ) + 1 ( 0.5 x7 ) + 1 = ( 0.5 x4 + 0.5x5 ) + 1 ( 0.5 x4 - 0.5x5 ) + 1 + = x4 + 1 >= 0.5 x2 + 1 = 0.5 x0 + 0.5x1 + 1 : [0, 2] + x10.ub = 1 ( 0.75 x6 + 0.75 ) + 1 ( 0.5 x7 + 1 ) + 1 + = ( 0.75 x4 + 0.75 x5 + 0.75 ) + 1 ( 0.5 x4 - 0.5x5 + 1 ) + 1 + = 1.25 x4 + 0.25 x5 + 2.75 <= 0.625 x4 + 0.125 x5 + 4.25 + = 0.75 x0 + 0.5 x1 + 4.25 : [2.5, 5.5] + x10 range: [0, 5.5] + + x11 = x9 + x11.lb = 0.5 x1 - 0.5 : [-1, 0] + x11.ub = 0.5x1 + 1.5 : [1, 2] + x11 range: [-1, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 5.5, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_abs_all_positive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both absolute values positive, bound survive through activations: + + x2 <= x4 <= x2 + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + x5 = x4 - x5 + => x2 - x3 <= x5 <= x2 - x3 + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_abs_positive_and_negative() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + x2 = 2x0 + 3x1 - 30 + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + -x2 <= x4 <= -x2 + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + x5 = x4 - x5 + => -x2 - x3 <= x5 <= -x2 - x3 + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_absolute_values_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is undecided, bounds are concretized. + Second absolute value is active, bounds surive the activation + + 0 <= x4 <= 12 + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => -x3 <= x6 <= -x3 + 12 + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_absolute_values_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + -x2 <= x4 <= -x2 + x4: all set to 3 + + x3 <= x5 <= x3 + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 3: + + x6 = x4 - x5 + => -x2 - x3 <= x6 <= -x2 - x3 + => -x3 + 3 <= x6 <= -x3 + 3 + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_signs_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = 2x0 + 3x1 - 15 + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is undecided, bounds are concretized. + Second sign is active, bounds become constant 1 + Using custom coefficients with alpha = { 0.5, 0.5 }. + Coefficient (first Sign, lower): 2/12 * 0.5 = 1/12. + Coefficient (first Sign, upper): -2/-4 * 0.5 = 1/4. + + 1/12 x2 - 1 <= x4 <= 1/4 x2 + 1 + x4.lb = 1/12 ( 2x0 + 3x1 - 15 ) - 1 = 2/12 x0 + 3/12 x1 - 27/12 + x4.ub = 1/4 ( 2x0 + 3x1 - 15 ) + 1 = 0.5 x0 + 0.75x1 - 2.75 + x4 range: [-1, 1] + + 1 <= x5 <= 1 + x5.lb = 1 + x5.ub = 1 + x5 range: [1, 1] + + Layer 3: + + x6 = x4 - x5 : [-2, 0] + => 1/12 x2 - 2 <= x6 <= 1/4 x2 : [-8/3, 6] + x6.lb = 1 ( 2/12 x0 + 3/12 x1 - 27/12 ) - 1 ( 1 ) = 2/12 x0 + 3/12 x1 - 39/12 : + [-28/12 = -7/3, -1] + x6.ub = 1 ( 0.5 x0 + 0.75x1 - 2.75 ) - 1 ( 1 ) = 0.5 x0 + 0.75x1 - 3.75 : [-1, 3] + + x6 range: [-2, 0] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, 0, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_signs_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layers 1, 2: + + x2 = -3 + x2 is eliminated, everything set to -3 + + x3 = x0 + x1 + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is negative, bounds become constant -1 + Second sign is positive, bounds become constant 1 + + -1 <= x4 <= 1 + x4: all set to -1 + + 1 <= x5 <= 1 + x5: all set to 1 + + Layer 3: + + x6 = x5 - x4 + x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 + x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, -1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyLeakyReLU( nlr, tableau ); // alpha = 0.2 + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layer 1: + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. Using custom lower coefficient with + alpha = { 0.5 }. + Lower Coefficient: ( 1 - 0.2 ) * 0.5 + 0.2 = 0.6 + Lower Bias: 0 + Upper Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Upper Bias: ( 0.2 - 1 ) * 2 * -2 /( 2--2 ) = 0.8 + + 0.6 x2 <= x4 <= 0.6 x2 + 0.8 + x4.lb = 0.6 ( x0 + x1 ) = 0.6 x0 + 0.6x1 + x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6 x0 + 0.6 x1 + 0.8 + x4 range: [-1.2, 2] + + 0.6 x3 <= x5 <= 0.6 x3 + 0.8 + x5.lb = 0.6 ( x0 - x1 ) = 0.6 x0 - 0.6 x1 + x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6 x0 - 0.6 x1 + 0.8 + x5 range: [-1.2, 2] + + Layer 2: + + x6 = x4 + x5 + x6.lb = 1 ( 0.6x0 + 0.6x1 ) + 1 ( 0.6x0 - 0.6x1 ) = 1.2 x0 : [-1.2, 1.2] + x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2 x0 + 1.6 : + [0.4, 2.8] x6 range: [-1.2, 2.8] + + x7 = x4 - x5 + x7.lb = 1 ( 0.6x0 + 0.6x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2 x1 - 0.8 : [-2, 0.4] + x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( 0.6x0 - 0.6x1 ) = 1.2 x1 + 0.8 : [-0.4, 2] + x7 range: [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. Using custom lower coefficient with + alpha = { 0.5 }. + Lower Coefficient (first LeakyReLU): ( 1 - 0.2 ) * 0.5 + 0.2 = 0.6 + Lower Bias (first LeakyReLU): 0 + Upper Coefficient (first LeakyReLU): ( 2.8 - 0.2*-1.2 )/( 2.8--1.2 ) = 3.04/4 = 0.76 + Upper Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -1.2 / ( 2.8--1.2 ) = 0.672 + + Lower Coefficient (second LeakyReLU): ( 1 - 0.2 ) * 0.5 + 0.2 = 0.6 + Lower Bias (second LeakyReLU): 0 + Upper Coefficient (second LeakyReLU): ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Upper Bias (second LeakyReLU): ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 + + 0.6 x6 <= x8 <= 0.76 x6 + 0.672 + x8.lb = 0.6 ( 1.2x0 ) = 0.72 x0 + x8.ub = 0.76 ( 1.2x0 + 1.6 ) + 0.672 = 0.912 x0 + 1.888 + x8 range: [-0.72, 2.8] + + 0.6 x7 <= x9 <= 0.6 x7 + 0.8 + x9.lb = 0.6 ( 1.2x1 - 0.8 ) = 0.72 x0 - 0.48 + x9.ub = 0.6 ( 1.2x1 + 0.8 ) + 0.8 = 0.72 x1 + 1.28 + x9 range: [-1.2, 2] + + Layer 3: + + x10 = x8 + x9 + 1 + x10.lb = 0.6 x6 + 0.6 x7 + 1 >= 0.6 ( x4 + x5 ) + 0.6 ( x4 - x5 ) + 1 = + 1.2 x4 + 1 >= 1.2 ( 0.6 x2 ) + 1 = 0.72 x2 + 1 + = 0.72 x0 + 0.72 x1 + 1 : [-0.44, 2.44] + x10.lb = ( 0.76 x6 + 0.672 ) + ( 0.6 x7 + 0.8 ) + 1 = 0.76 x6 + 0.6 x7 + 2.472 + >= 0.76 ( x4 + x5 ) + 0.6 ( x4 - x5 ) + 2.472 = 1.36 x4 + 0.16 x5 + 2.472 + >= 1.36 ( 0.6 x2 + 0.8 ) + 0.16 ( 0.6 x3 + 0.8 ) + 2.472 + = 0.816 x2 + 0.096 x3 + 3.688 = 0.912 x0 + 0.72 x1 + 3.688 : [2.056, 5.32] + x10 range: [-0.44, 5.32] + + x11.lb = 0.72 x0 - 0.48 : [-1.2, 0.24] + x11.ub = 0.72 x1 + 1.28 : [-0.56, 2] + x11 range: [-1.2, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1.2, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1.2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -0.72, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), + Tightening( 9, -1.2, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -0.44, Tightening::LB ), Tightening( 10, 5.32, Tightening::UB ), + Tightening( 11, -1.2, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_sigmoids_and_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySigmoidsAndRound( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + // Layer 1 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); + + // Layer 2 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); + + // Layer 3 + /* + Double-check with Python + --- + from math import exp as e + def g(x): + return 1 / (1 + e(-x)) + + def g_prime(x): + return g(x) * (1 - g(x)) + + def lam(l, u): + return (g(u) - g(l)) / (u - l) + + def lam_prime(l, u): + return min(g_prime(l), g_prime(u)) + + l3 = l4 = -2 + u3 = u4 = 2 + l5 = l6 = g(-2) + u5 = u6 = g(2) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) + x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) + x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) + x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) + print(x7_l) + print(x7_u) + print(x8_l) + print(x8_u) + + ''' + Sigmoid linear relaxation ( Layer 2 ): + x4 >= lambda7_prime * x2 + ( g(l3) - lambda7_prime * l3 ) + x4 <= lambda7_prime * x2 + ( g(u3) - lambda7_prime * u3 ) + x5 >= lambda8_prime * x3 + ( g(l4) - lambda8_prime * l4 ) + x5 <= lambda8_prime * x3 + ( g(u4) - lambda7_prime * u4 ) + ''' + print('------------------') + print(lambda7_prime) + print(lambda8_prime) + print(g(l3) - lambda7_prime * l3) + print(g(u3) - lambda7_prime * u3) + print(g(l4) - lambda8_prime * l4) + print(g(u4) - lambda8_prime * u4) + + --- + [output]: + 0.4483930148512481 + 1.5516069851487517 + -0.5516069851487517 + 0.5516069851487517 + ------------------ + 0.1049935854035065 + 0.1049935854035065 + 0.3291900928291306 + 0.6708099071708693 + 0.3291900928291306 + 0.6708099071708693 + */ + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); + + // Layer 4 + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + } + + void test_parameterised_deeppoly_max_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 2] + + Layers 1, 2, 3: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 3] + x2.ub = x0 + x1 : [-2, 3] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-3, 2] + x3.ub = x0 - x1 : [-3, 2] + + Both ReLUs are undecided, bounds are concretized. Using custom ReLU lower + coefficient of 0.5.\ + Upper coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6. + Upper coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 + + 0.5 x2 <= x4 <= 0.6 x2 + 1.2 + x4.lb = 0.5 ( x0 + x1 ) = 0.5 x0 + 0.5 x1 + x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 + x4 range: [-1, 3] + + 0.5 x3 <= x5 <= 0.4 x3 + 1.2 + x5.lb = 0.5 ( x0 - x1 ) = 0.5 x0 - 0.5 x1 + x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 + x5 range: [-1.5, 2] + + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x4, and its upper bound is constant 3. + + x4 <= x6 <= 3 + x6.lb = 0.5 x0 + 0.5 x1 : [-1, 1.5] + x6.ub = 3 : [3, 3] + x6 range: [-1, 3] + + Layer 4: + + x7 = 2x6 + => 2x4 <= x7 <= 6 + x7.lb = 2 ( 0.5 x0 + 0.5 x1 ) = x0 + x1 : [-2, 3] + x7.ub = 2 ( 3 ) = 6 : [6, 6] + x7 range: [-2, 6] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 3, Tightening::UB ), + Tightening( 3, -3, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -1.5, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), + Tightening( 7, 6, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_max_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyMax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -3 ); + tableau.setUpperBound( 1, -2 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-3, -2] + + Layer 1: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 0] + x2.ub = x0 + x1 : [-2, 0] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [3, 5] + x3.ub = x0 - x1 : [3, 5] + + First ReLU is negative, bounds become constant 0 + Second ReLU is positive, bounds survive the activation + + 0 <= x4 <= 0 + x4: all set to 0 + + x3 <= x5 <= x3 + x5.lb = x0 - x1 : [3, 5] + x5.ub = x0 - x1 : [3, 5] + + Max is fixed because x5.lb > x4.ub, it inherits x5's bounds + + x5 <= x6 <= x5 + => x3 <= x6 <= x5 + x6.lb = x0 - x1 : [3, 5] + x6.ub = x0 - x1 : [3, 5] + + Layer 3: + + x7 = 2x6 + => x7 = 2x5 = 2x3 = 2x0 - 2x1 + x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 0, Tightening::UB ), + Tightening( 3, 3, Tightening::LB ), + Tightening( 3, 5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 3, Tightening::LB ), + Tightening( 5, 5, Tightening::UB ), + Tightening( 6, 3, Tightening::LB ), + Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 6, Tightening::LB ), + Tightening( 7, 10, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_softmax1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + } + + void test_parameterised_deeppoly_softmax2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + */ + + unsigned size = nlr.getLayer( 2 )->getSize(); + Vector sourceLbs = { 1.999899, 2.999899, -0.000003 }; + Vector sourceUbs = { 2.000102, 3.000102, 0.0001 }; + Vector sourceMids = { 2.0000005, 3.0000005, -0.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = + NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.4243, 0.4481, 0.1277 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.4243, 0.4480, 0.1277 } ) ) ); + + /* + Layer 2: + +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6 range: [ 0.2595, 0.2595 ] + +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 + x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 + x7 range: [ 0.7054, 0.7054 ] + +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277 + x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8 range: [ 0.0351, 0.0351 ] + + Layer 3: + + x9 = x6 + x7 + x8 + => x9 = ( 0.1922 - 0.1830 - 0.0091 ) x3 + ( -0.1830 + 0.2078 - 0.0248 ) x4 + ( + -0.0091 - 0.0248 + 0.0339 ) x5 + ( 0.4243 + 0.4481 + 0.1277 ) + + => x9 = 0.0001 x3 + 0 x4 + 0 x5 + 1.0001 + => ( Up to rounding ) 1 <= x9 <= 1. + x9.lb = 1 + x9.ub = 1 + x9 range: [ 1, 1 ] + + x10 = - x6 - x7 - x8 + => x10 = - ( 0.1922 - 0.1830 - 0.0091 ) x3 - ( -0.1830 + 0.2078 - 0.0248 ) x4 - ( + -0.0091 - 0.0248 + 0.0339 ) x5 - ( 0.4243 + 0.4481 + 0.1277 ) + + => x10 = - 0.0001 x3 - 0.0000 x4 - 0.0000 x5 - 1.0001 + => ( Up to rounding ) 1 <= x10 <= 1. + x10.lb = 1 + x10.ub = 1 + x10 range: [ -1, -1 ] + */ + + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + */ + + unsigned size = nlr.getLayer( 2 )->getSize(); + Vector sourceLbs = { 1.999899, 2.999899, -0.000003 }; + Vector sourceUbs = { 2.000102, 3.000102, 0.0001 }; + Vector sourceMids = { 2.0000005, 3.0000005, -0.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::ERLowerBound( sourceMids, sourceLbs, sourceUbs, i ); // Using er + symbolicUpperBias[i] = + NLR::Layer::ERUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dERLowerBound( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dERUpperBound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.2595, 0.7054, 0.0351 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1922, + -0.1830, + -0.0091, + -0.1830, + 0.2078, + -0.0248, + -0.0091, + -0.0248, + 0.0339 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.4243, 0.4481, 0.1277 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.4243, 0.4480, 0.1277 } ) ) ); + + /* + Layer 2: + +0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 + x6 range: [ 0.2595, 0.2595 ] + +-0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 + x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 + x7 range: [ 0.7054, 0.7054 ] + +-0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277 + x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 + x8 range: [ 0.0351, 0.0351 ] + + Layer 3: + + x9 = x6 + x7 + x8 + => x9 = ( 0.1922 - 0.1830 - 0.0091 ) x3 + ( -0.1830 + 0.2078 - 0.0248 ) x4 + ( + -0.0091 - 0.0248 + 0.0339 ) x5 + ( 0.4243 + 0.4481 + 0.1277 ) + + => x9 = 0.0001 x3 + 0 x4 + 0 x5 + 1.0001 + => ( Up to rounding ) 1 <= x9 <= 1. + x9.lb = 1 + x9.ub = 1 + x9 range: [ 1, 1 ] + + x10 = - x6 - x7 - x8 + => x10 = - ( 0.1922 - 0.1830 - 0.0091 ) x3 - ( -0.1830 + 0.2078 - 0.0248 ) x4 - ( + -0.0091 - 0.0248 + 0.0339 ) x5 - ( 0.4243 + 0.4481 + 0.1277 ) + + => x10 = - 0.0001 x3 - 0.0000 x4 - 0.0000 x5 - 1.0001 + => ( Up to rounding ) 1 <= x10 <= 1. + x10.lb = 1 + x10.ub = 1 + x10 range: [ -1, -1 ] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + } + + void test_parameterised_deeppoly_softmax3() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolySoftmax2( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + + Layer 1: + + x3 = x0 - x1 + x2 + 1 + x3.lb = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3.ub = x0 - x1 + x2 + 1 : [ 1.999999, 2.000002 ] + x3 range: [ 1.999999, 2.000002 ] + + x4 = -x0 + x1 + x2 + 2 + x4.lb = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4.ub = -x0 + x1 + x2 + 2 : [ 2.999999, 3.000002 ] + x4 range: [ 2.999999, 3.000002 ] + + x5 = -x0 - x1 - x2 + 3 + x5.lb = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5.ub = -x0 - x1 - x2 + 3 : [ -0.000003, 0 ] + x5 range: [ -0.000003, 0 ] + + x6 = -x0 - x1 - x2 + 2 + x6.lb = -x0 - x1 - x2 + 2 : [ -1.000003, -1 ] + x6.ub = -x0 - x1 - x2 + 2 : [ -1.000003, -1 ] + x6 range: [ -1.000003, -1 ] + + x7 = -x0 - x1 - x2 + 1 + x7.lb = -x0 - x1 - x2 + 1 : [ -2.000003, -2 ] + x7.ub = -x0 - x1 - x2 + 1 : [ -2.000003, -2 ] + x7 range: [ -2.000003, -2 ] + */ + + // First Sigmoid: x8 x10 x12 = softmax( x3, x5, x7 ). + unsigned size = nlr.getLayer( 2 )->getActivationSources( 0 ).size(); + Vector sourceLbs = { 1.999899, -0.000003, -2.000103 }; + Vector sourceUbs = { 2.000102, 0.0001, -1.999 }; + Vector sourceMids = { 2.0000005, -0.0000015, -2.0000015 }; + Vector targetLbs( size, 0 ); + Vector targetUbs( size, 0 ); + Vector symbolicLb( size * size, 0 ); + Vector symbolicUb( size * size, 0 ); + Vector symbolicLowerBias( size, 0 ); + Vector symbolicUpperBias( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.8668, 0.1173, 0.0159 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.8668, 0.1173, 0.0159 } ) ) ); + TS_ASSERT( compareVectors( symbolicLb, + Vector( { 0.1155, + -0.1017, + -0.0138, + -0.1017, + 0.1035, + -0.0019, + -0.0138, + -0.0019, + 0.0156 } ) ) ); + TS_ASSERT( compareVectors( symbolicUb, + Vector( { 0.1154, + -0.1017, + -0.0138, + -0.1017, + 0.1036, + -0.0019, + -0.0138, + -0.0019, + 0.0156 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLowerBias, Vector( { 0.6084, 0.3170, 0.0747 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUpperBias, Vector( { 0.6084, 0.3170, 0.0747 } ) ) ); + + // Second Sigmoid: x9 x11 = softmax( x4, x6 ). + size = nlr.getLayer( 2 )->getActivationSources( 1 ).size(); + sourceLbs = Vector( { 2.999899, -1.000103 } ); + sourceUbs = Vector( { 3.000102, -0.9999 } ); + sourceMids = Vector( { 3.0000005, -1.0000015 } ); + targetLbs = Vector( size, 0 ); + targetUbs = Vector( size, 0 ); + symbolicLb = Vector( size * size, 0 ); + symbolicUb = Vector( size * size, 0 ); + symbolicLowerBias = Vector( size, 0 ); + symbolicUpperBias = Vector( size, 0 ); + for ( unsigned i = 0; i < size; ++i ) + { + targetLbs[i] = NLR::Layer::linearLowerBound( sourceLbs, sourceUbs, i ); + targetUbs[i] = NLR::Layer::linearUpperBound( sourceLbs, sourceUbs, i ); + } + for ( unsigned i = 0; i < size; ++i ) + { + symbolicLowerBias[i] = + NLR::Layer::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, i ); // Using lse2 + symbolicUpperBias[i] = NLR::Layer::LSEUpperBound( sourceMids, targetLbs, targetUbs, i ); + for ( unsigned j = 0; j < size; ++j ) + { + symbolicLb[size * j + i] = + NLR::Layer::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, i, j ); + symbolicUb[size * j + i] = + NLR::Layer::dLSEUpperbound( sourceMids, targetLbs, targetUbs, i, j ); + symbolicLowerBias[i] -= symbolicLb[size * j + i] * sourceMids[j]; + symbolicUpperBias[i] -= symbolicUb[size * j + i] * sourceMids[j]; + } + } + TS_ASSERT( compareVectors( targetLbs, Vector( { 0.9820, 0.0180 } ) ) ); + TS_ASSERT( compareVectors( targetUbs, Vector( { 0.9820, 0.0180 } ) ) ); + TS_ASSERT( + compareVectors( symbolicLb, Vector( { 0.0177, -0.0177, -0.0177, 0.0177 } ) ) ); + TS_ASSERT( + compareVectors( symbolicUb, Vector( { 0.0177, -0.0177, -0.0177, 0.0177 } ) ) ); + TS_ASSERT( compareVectors( symbolicLowerBias, Vector( { 0.9114, 0.0886 } ) ) ); + TS_ASSERT( compareVectors( symbolicUpperBias, Vector( { 0.9114, 0.0886 } ) ) ); + + /* + Layer 2: + + First Sigmoid: x8 x10 x12 = softmax( x3, x5, x7 ). +0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1154 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 + x8.lb = 0.2310 x0 + 0.0001 x1 + 0.2310 x2 + 0.4051 + x8.ub = 0.2310 x0 + 0.0000 x1 + 0.2310 x2 + 0.4050 + x8 range: [ 0.8668, 0.8668 ] + +-0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + 0.3170 + x10.lb = -0.2033 x0 + 0.0001 x1 - 0.2033 x2 + 0.5239 + x10.ub = -0.2033 x0 + 0.0000 x1 - 0.2033 x2 + 0.5241 + x10 range: [ 0.1173, 0.1173 ] + +-0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 + x12.lb = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 + x12.ub = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 + x12 range: [ 0.0159, 0.0159 ] + + Second Sigmoid: x9 x11 = softmax( x4, x6 ). +0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 + x9.lb = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 + x9.ub = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 + x9 range: [ 0.9820, 0.0180 ] + +-0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 + x11.lb = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 + x11.ub = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 + x11 range: [ 0.9820, 0.0180 ] + + Layer 3: + + x13 = x8 + x10 + x12 + => x13 = ( 0.1155 - 0.1017 - 0.0138 ) x3 + ( -0.1017 + 0.1035 - 0.0019 ) x5 + + ( -0.0138 - 0.0019 + 0.0156 ) x7 + ( 0.6084 + 0.3170 + 0.0747 ) + + => x13 = 0 x3 - 0.0001 x5 - 0.0001 x7 + 1.0001 + => ( Up to rounding ) 1 <= x13 <= 1. + x13.lb = 1 + x13.ub = 1 + x13 range: [ 1, 1 ] + + x14 = - x8 - x10 - x12 + => x14 = - ( 0.1155 - 0.1017 - 0.0138 ) x3 - ( -0.1017 + 0.1035 - 0.0019 ) x5 + - ( -0.0138 - 0.0019 + 0.0156 ) x7 - ( 0.6084 + 0.3170 + 0.0747 ) + + => x14 = 0 x3 + 0.0001 x5 + 0.0001 x7 - 1.0001 + => ( Up to rounding ) -1 <= x14 <= -1. + x14.lb = -1 + x14.ub = -1 + x14 range: [ -1, -1 ] + + x15 = x9 + x11 + => x15 = ( 0.0177 - 0.0177 ) x4 + ( -0.0177 + 0.0177 ) x6 + ( 0.9114 + 0.0886 ) + + => x15 = 0 x4 + 0 x6 + 1 + => ( Up to rounding ) 1 <= x15 <= 1. + x15.lb = 1 + x15.ub = 1 + x15 range: [ 1, 1 ] + + x16 = - x9 - x11 + => x16 = - ( 0.0177 - 0.0177 ) x4 - ( -0.0177 + 0.0177 ) x6 - ( 0.9114 + 0.0886 ) + + => x16 = 0 x4 + 0 x6 - 1 + => ( Up to rounding ) -1 <= x16 <= -1. + x16.lb = -1 + x16.ub = -1 + x16 range: [ -1, -1 ] + */ + + List expectedBounds( { + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.86681, Tightening::LB ), Tightening( 8, 0.86682, Tightening::UB ), + Tightening( 9, 0.98201, Tightening::LB ), Tightening( 9, 0.98201, Tightening::UB ), + Tightening( 10, 0.11731, Tightening::LB ), Tightening( 10, 0.11731, Tightening::UB ), + Tightening( 11, 0.017985, Tightening::LB ), Tightening( 11, 0.017986, Tightening::UB ), + Tightening( 12, 0.015875, Tightening::LB ), Tightening( 12, 0.015876, Tightening::UB ), + Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), + Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), + Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_parameterised_deeppoly_bilinear() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkDeepPolyBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); + + unsigned paramCount = nlr.getNumberOfParameters(); + Vector coeffs( paramCount, 0.5 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( false, coeffs ) ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-2, 1] + + Layers 1, 2: + + x2 = x0 - 2x1 + x2.lb = x0 - 2x1 : [-1, 6] + x2.ub = x0 - 2x1 : [-1, 6] + + x3 = x0 + x1 + x3.lb = x0 + x1 : [-1, 3] + x3.ub = x0 + x1 : [-1, 3] + + Using custom coefficients with alpha = { 0.5, 0.5 }. + Coefficients for bilinear layer: + Lower bound: + alpha_l = 0.5 x3.lb + ( 1 - 0.5 ) x3.ub = 0.5 * -1 + 0.5 * 3 = 1 + beta_l = 0.5 x2.lb + ( 1 - 0.5 ) x2.ub = 0.5 * -1 + 0.5 * 6 = 2.5 + gamma_l = -0.5 x2.lb x3.lb - ( 1 - 0.5 ) x2.ub x3.ub = -0.5 * -1 * -1 - 0.5 * 6 * 3 = + -9.5. + + Upper bound: + alpha_l = 0.5 x3.ub + ( 1 - 0.5 ) x3.lb = 0.5 * -1 + 0.5 * 3 = 1 + beta_l = 0.5 x2.lb + ( 1 - 0.5 ) x2.ub = 0.5 * -1 + 0.5 * 6 = 2.5 + gamma_l = -0.5 x2.lb x3.ub - ( 1 - 0.5 ) x2.ub x3.lb = -0.5 * -1 * 6 - 0.5 * -1 * 3 + = 4.5. + + S = { x2.lb x3.lb, x2.ub x3.lb, x2.lb x3.ub, x2.ub x3.ub } = { 1, -3, -6, 18 } + -6 <= min S <= x4 <= max S = 18 + x2 + 2.5 x3 - 9.5 <= x4 <= x2 + 2.5 x3 + 4.5 + x4.lb = 1 ( x0 - 2x1 ) + 2.5 ( x0 + x1 ) - 9.5 = 3.5 x0 + 0.5 x1 - 9.5 : [-7, -2] + x4.ub = 1 ( x0 - 2x1 ) + 2.5 ( x0 + x1 ) + 4.5 = 3.5 x0 + 0.5 x1 + 4.5 : [7, 12] + x4 range: [-6, 18] + + Layer 3: + + x5 = -x4 : [-18, 6] + => -x2 - 2.5 x3 - 4.5 <= x4 <= -x2 - 2.5 x3 + 9.5 + x5.lb = -1 ( 3.5 x0 + 0.5 x1 + 4.5 ) = -3.5 x0 - 0.5 x1 - 4.5 : [-12, 0] + x5.ub = -1 ( 3.5 x0 + 0.5 x1 - 9.5 ) = -3.5 x0 - 0.5 x1 + 9.5 : [2, 7] + x5 range: [-12, 6] + */ + + List expectedBounds( { Tightening( 2, -1, Tightening::LB ), + Tightening( 2, 6, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 3, Tightening::UB ), + Tightening( 4, -6, Tightening::LB ), + Tightening( 4, 18, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), + Tightening( 5, 6, Tightening::UB ) } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + bool existsBounds( const List &bounds, Tightening bound ) + { + for ( const auto &b : bounds ) + { + if ( b._type == bound._type && b._variable == bound._variable ) + { + if ( FloatUtils::areEqual( b._value, bound._value ) ) + return true; + } + } + return false; + } + + bool boundsEqual( const List &bounds, const List &expectedBounds ) + { + if ( bounds.size() != expectedBounds.size() ) + return false; + + bool allFound = true; + for ( const auto &bound : bounds ) + { + bool currentFound = false; + for ( const auto &expectedBound : expectedBounds ) + { + currentFound |= + ( bound._type == expectedBound._type && + bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + } + allFound &= currentFound; + } + return allFound; + } + + void updateTableau( MockTableau &tableau, List &tightenings ) + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + { + tableau.setLowerBound( tightening._variable, tightening._value ); + } + + if ( tightening._type == Tightening::UB ) + { + tableau.setUpperBound( tightening._variable, tightening._value ); + } + } + } + + bool compareVectors( const Vector &vectorA, const Vector &vectorB ) + { + if ( vectorA.size() != vectorB.size() ) + return false; + + for ( unsigned i = 0; i < vectorA.size(); ++i ) + { + if ( !FloatUtils::areEqual( vectorA[i], vectorB[i], 0.0001 ) ) + return false; + } + + return true; } }; diff --git a/src/nlr/tests/Test_LPRelaxation.h b/src/nlr/tests/Test_LPRelaxation.h index 49dbd06135..6aafe080ec 100644 --- a/src/nlr/tests/Test_LPRelaxation.h +++ b/src/nlr/tests/Test_LPRelaxation.h @@ -2,7 +2,7 @@ /*! \file Test_LPRelaxation.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Andrew Wu + ** Guy Katz, Andrew Wu, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -1760,7 +1760,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 0, 1 ); nlr.setBias( 3, 1, 2 ); - // Mark the Softmax/Max sources + // Mark the Softmax sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 0, 2, 1 ); nlr.addActivationSource( 1, 0, 2, 2 ); @@ -1771,6 +1771,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 1, 2, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the Max sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 0 ); @@ -1819,7 +1820,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 11, large ); } - void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { @@ -1885,7 +1885,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 3, 0, 2 ); nlr.setBias( 3, 2, -2 ); - // Mark the Softmax/Max sources + // Mark the Softmax sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 0 ); nlr.addActivationSource( 1, 2, 2, 0 ); @@ -1913,6 +1913,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 1, 4, 2 ); nlr.addActivationSource( 3, 2, 4, 2 ); + // Mark the Max sources nlr.addActivationSource( 4, 0, 5, 0 ); nlr.addActivationSource( 4, 1, 5, 0 ); nlr.addActivationSource( 4, 2, 5, 0 ); @@ -2019,11 +2020,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 0, 1 ); nlr.setBias( 3, 1, 2 ); - // Mark the ReLU/Bilinear sources + // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the Bilinear sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 0 ); @@ -2136,7 +2138,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 3, 0, 2 ); - // Mark the ReLU/Bilinear sources + // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); @@ -2145,6 +2147,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); + // Mark the Bilinear sources nlr.addActivationSource( 4, 0, 5, 0 ); nlr.addActivationSource( 4, 1, 5, 0 ); @@ -2244,7 +2247,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2257,7 +2259,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -2286,7 +2287,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2311,7 +2311,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2344,7 +2343,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2381,7 +2379,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2398,7 +2395,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -2447,7 +2443,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2483,7 +2478,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2519,7 +2513,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2545,7 +2538,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2556,7 +2548,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -2585,7 +2576,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2610,7 +2600,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2640,7 +2629,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2677,7 +2665,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2688,7 +2675,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -2737,7 +2723,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2773,7 +2758,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2801,7 +2785,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2827,7 +2810,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2838,7 +2820,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -2867,7 +2848,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2892,7 +2872,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2922,7 +2901,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -2959,7 +2937,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -2970,7 +2947,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -3019,7 +2995,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3055,7 +3030,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3083,7 +3057,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3109,7 +3082,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3123,7 +3095,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -3152,7 +3123,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3177,7 +3147,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3213,7 +3182,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3250,7 +3218,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3261,7 +3228,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -3310,7 +3276,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3346,7 +3311,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3374,7 +3338,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3400,7 +3363,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3411,7 +3373,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -3440,7 +3401,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3465,7 +3425,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3495,7 +3454,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3532,7 +3490,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3543,7 +3500,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -3592,7 +3548,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3628,7 +3583,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3656,7 +3610,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3682,7 +3635,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3698,7 +3650,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -3727,7 +3678,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3752,7 +3702,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3788,7 +3737,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3825,7 +3773,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3848,7 +3795,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -3897,7 +3843,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -3933,7 +3878,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3974,8 +3918,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - - // Invoke SBT + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4000,7 +3943,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4011,7 +3953,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -4040,7 +3981,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4065,7 +4005,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4096,7 +4035,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4127,7 +4065,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4138,7 +4075,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -4179,7 +4115,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 17, -large ); tableau.setUpperBound( 17, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4209,7 +4144,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4237,7 +4171,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4263,7 +4196,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); @@ -4273,7 +4205,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -4302,7 +4233,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4327,7 +4257,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4359,7 +4288,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4388,7 +4316,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4402,7 +4329,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -4439,7 +4365,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 15, -large ); tableau.setUpperBound( 15, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4467,7 +4392,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4500,7 +4424,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4526,7 +4449,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4543,7 +4465,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -4572,7 +4493,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4597,7 +4517,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4633,7 +4552,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4670,7 +4588,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4700,7 +4617,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -4749,7 +4665,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4785,7 +4700,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4836,7 +4750,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4862,7 +4775,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4886,7 +4798,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -4915,7 +4826,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4940,7 +4850,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4989,7 +4898,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -5026,7 +4934,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -5057,7 +4964,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -5106,7 +5012,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -5142,7 +5047,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -5190,7 +5094,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -5216,7 +5119,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -5228,7 +5130,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -5257,7 +5158,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -5282,7 +5182,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -5313,7 +5212,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -5350,7 +5248,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -5362,7 +5259,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -5411,7 +5307,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -5447,7 +5342,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -5476,7 +5370,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -5502,22 +5395,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - Tightening( 10, 8.7411, Tightening::UB ), - Tightening( 11, -2.4149, Tightening::LB ), + Tightening( 10, 8.361, Tightening::UB ), + Tightening( 11, -8.361, Tightening::LB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -5546,7 +5437,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -5571,7 +5461,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -5579,14 +5468,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite List expectedBounds4( { Tightening( 7, 0, Tightening::LB ), - Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 5.8235, Tightening::UB ), Tightening( 10, -14, Tightening::LB ), Tightening( 10, 40.7647, Tightening::UB ), - Tightening( 11, -24.8805, Tightening::LB ), + Tightening( 11, -40.7647, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), } ); @@ -5613,7 +5502,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -5642,7 +5530,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -5662,7 +5549,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -5699,7 +5585,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 15, -large ); tableau.setUpperBound( 15, large ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -5727,7 +5612,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index f523a9e993..2e41d100f0 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -2,7 +2,7 @@ /*! \file Test_NetworkLevelReasoner.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Andrew Wu + ** Guy Katz, Andrew Wu, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -479,7 +479,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - void populateNetworkWithMax( NLR::NetworkLevelReasoner &nlr ) { /* @@ -740,11 +739,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 0, 1 ); nlr.setBias( 3, 1, 2 ); - // Mark the Round/Sign sources + // Mark the Abs sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the ReLU sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); @@ -812,11 +812,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 0, 1 ); nlr.setBias( 3, 1, 2 ); - // Mark the Round/Sign sources + // Mark the Round sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the Sign sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); @@ -886,11 +887,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 0, 1 ); nlr.setBias( 3, 1, 2 ); - // Mark the LeakyReLU/Sigmoid sources + // Mark the LeakyReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the Sigmoid sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); @@ -956,7 +958,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 0, 1 ); nlr.setBias( 3, 1, 2 ); - // Mark the Softmax/Max sources + // Mark the Softmax sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 0, 2, 1 ); nlr.addActivationSource( 1, 0, 2, 2 ); @@ -967,6 +969,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 1, 2, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the Max sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 0 ); @@ -1030,11 +1033,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 0, 1 ); nlr.setBias( 3, 1, 2 ); - // Mark the ReLU/Bilinear sources + // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the Bilinear sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 0 ); @@ -1158,7 +1162,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 0, 0, 3, 0, -1 ); nlr.setWeight( 1, 0, 5, 0, 3 ); - nlr.setBias( 3, 0, 1 ); nlr.setBias( 5, 0, 1 ); @@ -1626,7 +1629,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - x0 x3 S x6 x1 x4 S x7 @@ -1675,2266 +1677,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 1, 2 ); nlr.setBias( 1, 2, 3 ); + // Mark the Softmax sources nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 8 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 11 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - } - - void populateNetworkSBTSoftmax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - - x0 x3 S x8 - - x1 x4 S x9 - - x2 x5 S x10 - - x6 S x11 - - x7 S x12 - - x3 = x0 - x1 + x2 + 1 - x4 = -x0 + x1 + x2 + 2 - x5 = -x0 - x1 - x2 + 3 - x6 = -x0 - x1 - x2 + 2 - x7 = -x0 - x1 - x2 + 1 - - x8 x10 x12 = softmax(x3, x5, x7) - - x9 x11 = softmax(x4, x6) - - x13 = x8 + x10 + x12 - x14 = -x8 - x10 - x12 - x15 = x9 + x11 - x16 = -x9 - x11 - - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 5 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 5 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 4 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, -1 ); - nlr.setWeight( 0, 0, 1, 2, -1 ); - nlr.setWeight( 0, 0, 1, 3, -1 ); - nlr.setWeight( 0, 0, 1, 4, -1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 2, -1 ); - nlr.setWeight( 0, 1, 1, 3, -1 ); - nlr.setWeight( 0, 1, 1, 4, -1 ); - nlr.setWeight( 0, 2, 1, 0, 1 ); - nlr.setWeight( 0, 2, 1, 1, 1 ); - nlr.setWeight( 0, 2, 1, 2, -1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - nlr.setWeight( 0, 2, 1, 4, -1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 4, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - nlr.setWeight( 2, 4, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 2, 1 ); - nlr.setWeight( 2, 3, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 3, -1 ); - nlr.setWeight( 2, 3, 3, 3, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 1, 1, 2 ); - nlr.setBias( 1, 2, 3 ); - nlr.setBias( 1, 3, 2 ); - nlr.setBias( 1, 4, 1 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 4, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 4, 2, 2 ); - nlr.addActivationSource( 1, 0, 2, 4 ); - nlr.addActivationSource( 1, 2, 2, 4 ); - nlr.addActivationSource( 1, 4, 2, 4 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 3 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 4 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 4 ), 12 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 13 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 3 ), 16 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 17 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - } - - void populateNetworkSBTBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - - x0 x2 - x x4 -- x5 - x1 x3 - - x2 = x0 - 2 * x1 - x3 = x0 + x1 - x4 = -x5 - - x4 = x2 * x3 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -2 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, -1 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 5 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 6 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - } - - void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - 1 R -1 R -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ R / \ R / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using ReLU activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardReLU2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = ReLU( x3 ) - x8 = ReLU( x4 ) - x9 = ReLU( x5 ) - x10 = ReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = ReLU( x11 ) - x15 = ReLU( x12 ) - x16 = ReLU( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = ReLU( x17 ) - x20 = ReLU( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::RELU, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - 1 S -1 S -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ S / \ S / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Sigmoid activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardSigmoid2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Sigmoid( x3 ) - x8 = Sigmoid( x4 ) - x9 = Sigmoid( x5 ) - x10 = Sigmoid( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Sigmoid( x11 ) - x15 = Sigmoid( x12 ) - x16 = Sigmoid( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Sigmoid( x17 ) - x20 = Sigmoid( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - 1 Sign -1 Sign -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ Sign / \ Sign / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Sign activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardSign2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Sign( x3 ) - x8 = Sign( x4 ) - x9 = SIgn( x5 ) - x10 = Sign( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Sign( x11 ) - x15 = Sign( x12 ) - x16 = Sign( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Sign( x17 ) - x20 = Sign( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 Rnd -1 Rnd -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ Rnd / \ Rnd / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Round activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Round sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardRound2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Round( x3 ) - x8 = Round( x4 ) - x9 = Round( x5 ) - x10 = Round( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Round( x11 ) - x15 = Round( x12 ) - x16 = Round( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Round( x17 ) - x20 = Round( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Round sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardAbs( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 A -1 A -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ A / \ A / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Absolute value activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardAbs2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Abs( x3 ) - x8 = Abs( x4 ) - x9 = Abs( x5 ) - x10 = Abs( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Abs( x11 ) - x15 = Abs( x12 ) - x16 = Abs( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Abs( x17 ) - x20 = Abs( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 LR -1 LR -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ LR / \ LR / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardLeakyRelu2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = LeakyReLU( x3 ) - x8 = LeakyReLU( x4 ) - x9 = LeakyReLU( x5 ) - x10 = LeakyReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = LeakyReLU( x11 ) - x15 = LeakyReLU( x12 ) - x16 = LeakyReLU( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = LeakyReLU( x17 ) - x20 = LeakyReLU( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - nlr.getLayer( 6 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - a a' - x e - b b' f h - y d - c c' - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::MAX, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - - void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 - x1 x12 x15 x17 - x5 x9 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7, x8, x9, x10 = Softmax( x2, x3, x4, x5 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14, x15, x16 = Softmax( x11, x12, x13 ) - - x17 = Max( x14, x15, x16 ) - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 5, NLR::Layer::MAX, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 3, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 2 ); - nlr.addActivationSource( 1, 0, 2, 3 ); - nlr.addActivationSource( 1, 1, 2, 3 ); - nlr.addActivationSource( 1, 2, 2, 3 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - nlr.addActivationSource( 3, 2, 4, 0 ); - nlr.addActivationSource( 3, 0, 4, 1 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 1 ); - nlr.addActivationSource( 3, 0, 4, 2 ); - nlr.addActivationSource( 3, 1, 4, 2 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 4, 0, 5, 0 ); - nlr.addActivationSource( 4, 1, 5, 0 ); - nlr.addActivationSource( 4, 2, 5, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 18 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - } - - void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - a a' - x e - b b' f h - y d - c c' - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU/Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 2 ); nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); // Very loose bounds for neurons except inputs double large = 1000000; - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); + tableau.getBoundManager().initialize( 11 ); tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); tableau.setLowerBound( 4, -large ); @@ -3951,86 +1724,97 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 9, large ); tableau.setLowerBound( 10, -large ); tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); } - void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) + void populateNetworkSBTSoftmax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* - x3 x7 - x0 - x4 x8 x11 x13 - x1 x15 - x5 x9 x12 x14 - x2 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = ReLU( x3 ) - x8 = ReLU( x4 ) - x9 = ReLU( x5 ) - x10 = ReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - - x13 = ReLU( x11 ) - x14 = ReLU( x12 ) - - x15 = x13 * x14 + + x0 x3 S x8 + + x1 x4 S x9 + + x2 x5 S x10 + + x6 S x11 + + x7 S x12 + + x3 = x0 - x1 + x2 + 1 + x4 = -x0 + x1 + x2 + 2 + x5 = -x0 - x1 - x2 + 3 + x6 = -x0 - x1 - x2 + 2 + x7 = -x0 - x1 - x2 + 1 + + x8 x10 x12 = softmax(x3, x5, x7) + + x9 x11 = softmax(x4, x6) + + x13 = x8 + x10 + x12 + x14 = -x8 - x10 - x12 + x15 = x9 + x11 + x16 = -x9 - x11 + */ // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 5 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 5 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 4 ); // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) + for ( unsigned i = 1; i <= 3; ++i ) nlr.addLayerDependency( i - 1, i ); // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -1 ); + nlr.setWeight( 0, 0, 1, 2, -1 ); + nlr.setWeight( 0, 0, 1, 3, -1 ); + nlr.setWeight( 0, 0, 1, 4, -1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 2, -1 ); + nlr.setWeight( 0, 1, 1, 3, -1 ); + nlr.setWeight( 0, 1, 1, 4, -1 ); + nlr.setWeight( 0, 2, 1, 0, 1 ); + nlr.setWeight( 0, 2, 1, 1, 1 ); + nlr.setWeight( 0, 2, 1, 2, -1 ); nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 0, 2, 1, 4, -1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); + nlr.setWeight( 2, 4, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + nlr.setWeight( 2, 4, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 2, 1 ); + nlr.setWeight( 2, 3, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 3, -1 ); + nlr.setWeight( 2, 3, 3, 3, -1 ); - nlr.setBias( 3, 0, 2 ); + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 2 ); + nlr.setBias( 1, 2, 3 ); + nlr.setBias( 1, 3, 2 ); + nlr.setBias( 1, 4, 1 ); - // Mark the ReLU/Bilinear sources + // Mark the Softmax sources nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 4, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 2 ); nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 4, 2, 2 ); + nlr.addActivationSource( 1, 0, 2, 4 ); + nlr.addActivationSource( 1, 2, 2, 4 ); + nlr.addActivationSource( 1, 4, 2, 4 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 3 ); nlr.addActivationSource( 1, 3, 2, 3 ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - nlr.addActivationSource( 4, 0, 5, 0 ); - nlr.addActivationSource( 4, 1, 5, 0 ); - // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); @@ -4040,24 +1824,23 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 4 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 13 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 4 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 3 ), 16 ); // Very loose bounds for neurons except inputs double large = 1000000; - tableau.getBoundManager().initialize( 16 ); + tableau.getBoundManager().initialize( 17 ); tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); tableau.setLowerBound( 4, -large ); @@ -4084,6 +1867,69 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 14, large ); tableau.setLowerBound( 15, -large ); tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + } + + void populateNetworkSBTBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + x0 x2 + x x4 -- x5 + x1 x3 + + x2 = x0 - 2 * x1 + x3 = x0 + x1 + x4 = -x5 + + x4 = x2 * x3 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -2 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + + // Mark the Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 5 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 6 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); } void test_evaluate_relu() @@ -4342,7 +2188,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); } - void test_evaluate_softmax() { NLR::NetworkLevelReasoner nlr; @@ -4436,23 +2281,24 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 0, 1, 1, 1, -3 ); nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); nlr.setWeight( 2, 1, 3, 0, 2 ); nlr.setWeight( 2, 2, 3, 1, -2 ); nlr.setWeight( 0, 1, 3, 1, 1 ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - nlr.setWeight( 4, 0, 5, 0, 1 ); nlr.setWeight( 4, 1, 5, 0, 1 ); nlr.setWeight( 4, 2, 5, 0, 1 ); + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); // Evaluate double input[2]; double output; @@ -4993,11 +2839,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 5, 0, 0 ); nlr.setBias( 5, 1, 0 ); - // Mark the ReLU/Abs sources + // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the Abs sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); @@ -5697,23 +3544,25 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 0, 1, 1, 1, -3 ); nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); nlr.setWeight( 2, 1, 3, 0, 2 ); nlr.setWeight( 2, 2, 3, 1, -2 ); nlr.setWeight( 0, 1, 3, 1, 1 ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - nlr.setWeight( 4, 0, 5, 0, 1 ); nlr.setWeight( 4, 1, 5, 0, 1 ); nlr.setWeight( 4, 2, 5, 0, 1 ); + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); // Simulate1 @@ -6155,7 +4004,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 0, 1 ); nlr.setBias( 3, 1, 2 ); - // Mark the ReLU sources + // Mark the Abs sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); @@ -6588,7 +4437,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; populateNetworkWithRound( nlr ); - MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -6720,7 +4568,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; populateNetworkWithSigmoids( nlr ); - MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -6852,7 +4699,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; populateNetworkWithMax( nlr ); - MockTableau tableau; tableau.getBoundManager().initialize( 12 ); @@ -6973,7 +4819,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; populateNetworkWithSoftmax( nlr ); - MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -7105,7 +4950,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; populateNetworkWithBilinear( nlr ); - MockTableau tableau; tableau.getBoundManager().initialize( 12 ); @@ -7623,7 +5467,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; populateNetworkWithSoftmaxAndMax( nlr ); - MockTableau tableau; tableau.getBoundManager().initialize( 12 ); @@ -8211,7 +6054,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] - x5 range: [-2, 4] + x5 range: [-2, 5.5] */ List expectedBounds( { @@ -9615,20 +7458,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ List expectedBounds( - { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), - Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), - Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), - Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), - Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), - Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), - Tightening( 13, 0.9470, Tightening::LB ), Tightening( 13, 0.9470, Tightening::UB ), - Tightening( 14, -0.9470, Tightening::LB ), Tightening( 14, -0.9470, Tightening::UB ), - Tightening( 15, 1.0253, Tightening::LB ), Tightening( 15, 1.0253, Tightening::UB ), - Tightening( 16, -1.0253, Tightening::LB ), Tightening( 16, -1.0253, Tightening::UB ) + { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), + Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), + Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), + Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), + Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), + Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), + Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), + Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ) } ); @@ -9637,53 +7480,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_softmax_bounds_er() - { - Vector inputLb = { -1, 0, 1 }; - Vector inputUb = { 0, 2, 4 }; - Vector input = { -0.5, 1, 2.5 }; - - double value = NLR::Layer::ERLowerBound( input, inputLb, inputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0114799, 0.00001 ) ); - value = NLR::Layer::dERLowerBound( input, inputLb, inputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.00563867, 0.00001 ) ); - value = NLR::Layer::dERLowerBound( input, inputLb, inputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.000838421, 0.00001 ) ); - - - Vector outputLb = { 0.2, 0, 0 }; - Vector outputUb = { 0.4, 0.1, 0.1 }; - - value = NLR::Layer::ERUpperBound( input, outputLb, outputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, -1.44538, 0.00001 ) ); - value = NLR::Layer::dERUpperBound( input, outputLb, outputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 1.96538, 0.00001 ) ); - value = NLR::Layer::dERUpperBound( input, outputLb, outputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.358535, 0.00001 ) ); - } - - void test_softmax_bounds_lse1() - { - Vector inputLb = { -1, 0, 1 }; - Vector inputUb = { 0, 2, 3 }; - Vector input = { -0.5, 1, 2 }; - double value = NLR::Layer::LSELowerBound( input, inputLb, inputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::Layer::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::Layer::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.00703444, 0.001 ) ); - - Vector outputLb = { 0.2, 0, 0 }; - Vector outputUb = { 0.4, 0.1, 0.1 }; - value = NLR::Layer::LSEUpperBound( input, outputLb, outputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.164165, 0.00001 ) ); - value = NLR::Layer::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.272204, 0.00001 ) ); - value = NLR::Layer::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.073207, 0.00001 ) ); - } - void test_sbt_bilinear() { NLR::NetworkLevelReasoner nlr; @@ -10099,7 +7895,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] - x5 range: [-2, 4] + x5 range: [-2, 5.5] */ List expectedBounds( { @@ -11565,20 +9361,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ List expectedBounds( - { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), - Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), - Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), - Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), - Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), - Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), - Tightening( 13, 0.9470, Tightening::LB ), Tightening( 13, 0.9470, Tightening::UB ), - Tightening( 14, -0.9470, Tightening::LB ), Tightening( 14, -0.9470, Tightening::UB ), - Tightening( 15, 1.0253, Tightening::LB ), Tightening( 15, 1.0253, Tightening::UB ), - Tightening( 16, -1.0253, Tightening::LB ), Tightening( 16, -1.0253, Tightening::UB ) + { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), + Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), + Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), + Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), + Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), + Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), + Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), + Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ) } ); @@ -11707,7 +9503,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( assignment[13], 0 ) ); } - void test_obtain_bound_from_ipq() { NLR::NetworkLevelReasoner nlr; @@ -11716,7 +9511,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Query query; query.setNumberOfVariables( 14 ); - // Initialize the bounds query.setLowerBound( 0, -1 ); query.setUpperBound( 0, 1 ); diff --git a/src/nlr/tests/Test_PMNR.h b/src/nlr/tests/Test_PMNR.h index ce4e723758..5ca1bbb181 100644 --- a/src/nlr/tests/Test_PMNR.h +++ b/src/nlr/tests/Test_PMNR.h @@ -1,8 +1,8 @@ /********************* */ -/*! \file Test_LPRelaxation.h +/*! \file Test_PMNR.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Andrew Wu + ** Guy Katz, Andrew Wu, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -45,30 +45,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( delete mock ); } - void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + void populateNetworkWithAbsAndRelu( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* - - 1 R -1 R -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ R / \ R / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using ReLU activation instead of LeakyReLU + a + x d f + b + y e g + c */ // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); nlr.addLayer( 4, NLR::Layer::RELU, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); @@ -79,26 +69,30 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Set the weights and biases for the weighted sum layers nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); - nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 1 ); nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); - nlr.setBias( 5, 1, 2 ); + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); - // Mark the ReLU sources + // Mark the Abs sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the ReLU sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); @@ -108,23 +102,25 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); // Very loose bounds for neurons except inputs double large = 1000000; - tableau.getBoundManager().initialize( 12 ); + tableau.getBoundManager().initialize( 14 ); tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -145,144 +141,90 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 10, large ); tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); } - void populateNetworkBackwardReLU2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + void populateNetworkWithRoundAndSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = ReLU( x3 ) - x8 = ReLU( x4 ) - x9 = ReLU( x5 ) - x10 = ReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = ReLU( x11 ) - x15 = ReLU( x12 ) - x16 = ReLU( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = ReLU( x17 ) - x20 = ReLU( x18 ) - - x21 = x19 - x20 - 1 + a + x d f + b + y e g + c */ // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::RELU, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) + for ( unsigned i = 1; i <= 5; ++i ) nlr.addLayerDependency( i - 1, i ); // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); - nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 1 ); nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); - // Mark the ReLU sources + // Mark the Round sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); + // Mark the Sign sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); // Very loose bounds for neurons except inputs double large = 1000000; - tableau.getBoundManager().initialize( 22 ); + tableau.getBoundManager().initialize( 14 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); tableau.setLowerBound( 4, -large ); @@ -305,78 +247,59 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 12, large ); tableau.setLowerBound( 13, -large ); tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); } - void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + void populateNetworkWithLeakyReluAndSigmoid( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) { /* - - 1 S -1 S -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ S / \ S / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Sigmoid activation instead of LeakyReLU + a + x d f + b + y e g + c */ // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.getLayer( 2 )->setAlpha( 0.1 ); + // Mark layer dependencies for ( unsigned i = 1; i <= 5; ++i ) nlr.addLayerDependency( i - 1, i ); // Set the weights and biases for the weighted sum layers nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); - nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 1 ); nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); - nlr.setBias( 5, 1, 2 ); + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); - // Mark the Sigmoid sources + // Mark the LeakyReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the Sigmoid sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); @@ -386,23 +309,25 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); // Very loose bounds for neurons except inputs double large = 1000000; - tableau.getBoundManager().initialize( 12 ); + tableau.getBoundManager().initialize( 14 ); tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -423,144 +348,92 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 10, large ); tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); } - void populateNetworkBackwardSigmoid2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + void populateNetworkWithSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Sigmoid( x3 ) - x8 = Sigmoid( x4 ) - x9 = Sigmoid( x5 ) - x10 = Sigmoid( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Sigmoid( x11 ) - x15 = Sigmoid( x12 ) - x16 = Sigmoid( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Sigmoid( x17 ) - x20 = Sigmoid( x18 ) - - x21 = x19 - x20 - 1 + a + x d + b f + y e + c */ // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) + for ( unsigned i = 1; i <= 5; ++i ) nlr.addLayerDependency( i - 1, i ); // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); - // Mark the Sigmoid sources + // Mark the Softmax sources nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); + // Mark the Max sources nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); + nlr.addActivationSource( 3, 1, 4, 0 ); // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); // Very loose bounds for neurons except inputs double large = 1000000; - tableau.getBoundManager().initialize( 22 ); + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); tableau.setLowerBound( 4, -large ); @@ -579,55 +452,25 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 10, large ); tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); } - void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + void populateNetworkWithReluAndBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* - - 1 Sign -1 Sign -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ Sign / \ Sign / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Sign activation instead of LeakyReLU + a + x d + b f + y e + c */ // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); // Mark layer dependencies for ( unsigned i = 1; i <= 5; ++i ) @@ -635,28 +478,30 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Set the weights and biases for the weighted sum layers nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - nlr.setBias( 5, 1, 2 ); + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); - // Mark the Sign sources + // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + // Mark the Bilinear sources nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 0 ); // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); @@ -664,18 +509,18 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); // Very loose bounds for neurons except inputs double large = 1000000; @@ -703,142 +548,84 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 11, large ); } - void populateNetworkBackwardSign2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + void populateNetworkMinimalReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Sign( x3 ) - x8 = Sign( x4 ) - x9 = SIgn( x5 ) - x10 = Sign( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Sign( x11 ) - x15 = Sign( x12 ) - x16 = Sign( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Sign( x17 ) - x20 = Sign( x18 ) - - x21 = x19 - x20 - 1 + 1 R 1 R -1 1 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / / + 1 \ / 0 \ / / + \/ \/ / + /\ /\ / + 1 / \ 2 / \ -1 / + / \ R / \ R / + x1 --- x3 ---> x5 --- x7 ---> x9 + -1 1 1.5 + + The example described in Fig. 2 of + https://proceedings.neurips.cc/paper_files/paper/2019/file/0a9fdbb17feb6ccb7ec405cfb85222c4-Paper.pdf */ // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) + for ( unsigned i = 1; i <= 5; ++i ) nlr.addLayerDependency( i - 1, i ); // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 0, 1 ); nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 0 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); + nlr.setBias( 3, 1, 1.5 ); + nlr.setBias( 5, 0, 1 ); - // Mark the Sign sources + // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); // Very loose bounds for neurons except inputs double large = 1000000; - tableau.getBoundManager().initialize( 22 ); + tableau.getBoundManager().initialize( 11 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); tableau.setLowerBound( 4, -large ); @@ -855,10201 +642,1404 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 9, large ); tableau.setLowerBound( 10, -large ); tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); } - void populateNetworkBackwardRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + void test_backward_abs_and_relu() { - /* + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); - 1 Rnd -1 Rnd -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ Rnd / \ Rnd / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Round activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Round sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardRound2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Round( x3 ) - x8 = Round( x4 ) - x9 = Round( x5 ) - x10 = Round( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Round( x11 ) - x15 = Round( x12 ) - x16 = Round( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Round( x17 ) - x20 = Round( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Round sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardAbs( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 A -1 A -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ A / \ A / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Absolute value activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardAbs2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Abs( x3 ) - x8 = Abs( x4 ) - x9 = Abs( x5 ) - x10 = Abs( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Abs( x11 ) - x15 = Abs( x12 ) - x16 = Abs( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Abs( x17 ) - x20 = Abs( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 LR -1 LR -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ LR / \ LR / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardLeakyRelu2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = LeakyReLU( x3 ) - x8 = LeakyReLU( x4 ) - x9 = LeakyReLU( x5 ) - x10 = LeakyReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = LeakyReLU( x11 ) - x15 = LeakyReLU( x12 ) - x16 = LeakyReLU( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = LeakyReLU( x17 ) - x20 = LeakyReLU( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - nlr.getLayer( 6 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - a a' - x e - b b' f h - y d - c c' - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::MAX, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - - void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 - x1 x12 x15 x17 - x5 x9 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7, x8, x9, x10 = Softmax( x2, x3, x4, x5 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14, x15, x16 = Softmax( x11, x12, x13 ) - - x17 = Max( x14, x15, x16 ) - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 5, NLR::Layer::MAX, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 3, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 2 ); - nlr.addActivationSource( 1, 0, 2, 3 ); - nlr.addActivationSource( 1, 1, 2, 3 ); - nlr.addActivationSource( 1, 2, 2, 3 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - nlr.addActivationSource( 3, 2, 4, 0 ); - nlr.addActivationSource( 3, 0, 4, 1 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 1 ); - nlr.addActivationSource( 3, 0, 4, 2 ); - nlr.addActivationSource( 3, 1, 4, 2 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 4, 0, 5, 0 ); - nlr.addActivationSource( 4, 1, 5, 0 ); - nlr.addActivationSource( 4, 2, 5, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 18 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - } - - void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - a a' - x e - b b' f h - y d - c c' - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU/Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - x3 x7 - x0 - x4 x8 x11 x13 - x1 x15 - x5 x9 x12 x14 - x2 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = ReLU( x3 ) - x8 = ReLU( x4 ) - x9 = ReLU( x5 ) - x10 = ReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - - x13 = ReLU( x11 ) - x14 = ReLU( x12 ) - - x15 = x13 * x14 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::BILINEAR, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setBias( 3, 0, 2 ); - - // Mark the ReLU/Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - nlr.addActivationSource( 4, 0, 5, 0 ); - nlr.addActivationSource( 4, 1, 5, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 13 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 14 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 15 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 16 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - } - - void test_invprop_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), - Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 8, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), - - Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 8, 0, Tightening::LB ), - Tightening( 9, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), - - Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), - Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), - - Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), - - Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), - - Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 19, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), - Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), - Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), - - Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), - Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), - Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), - - Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 20, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_sigmoid() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), - Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), - - Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), - Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), - - Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), - Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), - - Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), - Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), - Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), - - Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), - Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), - - Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), - Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), - - Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), - Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_sigmoid2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), - Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), - Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), - Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), - - Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), - Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), - Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), - - Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), - Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), - Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), - - Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), - Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), - - Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), - Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), - - Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), - Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), - Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), - - Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), - Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), - Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), - - Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), - Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), - Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), - - Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), - Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), - - Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), - Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), - - Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_abs() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), - - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), - - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_abs2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), - - Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), - - Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_round() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 11, -4, Tightening::LB ), - Tightening( 11, 6, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), - - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -10, Tightening::LB ), - Tightening( 7, 5, Tightening::UB ), - - Tightening( 11, -14, Tightening::LB ), - Tightening( 11, 11, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_round2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), - Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), - Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), - - Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), - Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), - - Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), - Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), - - Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), - Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), - Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), - Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), - - Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_sign() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_sign2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_leaky_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 4, -0.1, Tightening::LB ), - - Tightening( 8, -0.045, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), - - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), - - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), - - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_leaky_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyRelu2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), - Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), - Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), - Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), - Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), - - Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), - Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), - - Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), - Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), - - Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.3, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - Tightening( 10, -0.6, Tightening::LB ), - - Tightening( 14, -0.9, Tightening::LB ), - Tightening( 15, -0.89, Tightening::LB ), - Tightening( 16, -0.77, Tightening::LB ), - - Tightening( 19, -2.3133, Tightening::LB ), - Tightening( 20, -1.2, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), - Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), - Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), - Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), - Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), - - Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), - Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), - - Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), - Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), - - Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.5, Tightening::LB ), - Tightening( 9, -0.6, Tightening::LB ), - Tightening( 10, -1.5, Tightening::LB ), - - Tightening( 14, -1.1, Tightening::LB ), - Tightening( 15, -2.1771, Tightening::LB ), - Tightening( 16, -1.15, Tightening::LB ), - - Tightening( 19, -5.6259, Tightening::LB ), - Tightening( 20, -1.9, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_softmax_and_max() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), - Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), - Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), - - Tightening( 8, -0.1870, Tightening::LB ), Tightening( 8, 1.4488, Tightening::UB ), - Tightening( 9, 0.6814, Tightening::LB ), Tightening( 9, 2.2432, Tightening::UB ), - - Tightening( 10, 0.6814, Tightening::LB ), Tightening( 10, 2.2432, Tightening::UB ), - - Tightening( 11, -2.2432, Tightening::LB ), Tightening( 11, -0.6814, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.2194, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), - - Tightening( 10, 0.2194, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), - - Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.2194, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_softmax_and_max2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), - Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), - Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), - Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), - - Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9634, Tightening::UB ), - Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9152, Tightening::UB ), - Tightening( 13, -2.9868, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), - - Tightening( 14, 0.1143, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8694, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4596, Tightening::UB ), - - Tightening( 17, 0.1143, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), - - Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9326, Tightening::UB ), - Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), - Tightening( 13, -2.9990, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), - - Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), - - Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_relu_and_bilinear() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), - - Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_invprop_relu_and_bilinear2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), - Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), - - Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), - Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), - - Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), - Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 8, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), - - Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 8, 0, Tightening::LB ), - Tightening( 9, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), - - Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), - Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), - - Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), - - Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), - - Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 19, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), - Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), - Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), - - Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), - Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), - Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), - - Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 20, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_sigmoid() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), - Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), - - Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), - Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), - - Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), - Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), - - Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), - Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), - Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), - - Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), - Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), - - Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), - Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), - - Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), - Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_sigmoid2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), - Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), - Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), - Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), - - Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), - Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), - Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), - - Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), - Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), - Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), - - Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), - Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), - - Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), - Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), - - Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), - Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), - Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), - - Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), - Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), - Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), - - Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), - Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), - Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), - - Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), - Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), - - Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), - Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), - - Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_abs() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), - - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), - - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_abs2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), - - Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), - - Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_round() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 11, -4, Tightening::LB ), - Tightening( 11, 6, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), - - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -10, Tightening::LB ), - Tightening( 7, 5, Tightening::UB ), - - Tightening( 11, -14, Tightening::LB ), - Tightening( 11, 11, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_round2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), - Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), - Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), - - Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), - Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), - - Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), - Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), - - Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), - Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), - Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), - Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), - - Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_sign() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_sign2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_leaky_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 4, -0.1, Tightening::LB ), - - Tightening( 8, -0.045, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), - - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), - - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), - - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_leaky_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyRelu2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), - Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), - Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), - Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), - Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), - - Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), - Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), - - Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), - Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), - - Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.3, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - Tightening( 10, -0.6, Tightening::LB ), - - Tightening( 14, -0.9, Tightening::LB ), - Tightening( 15, -0.89, Tightening::LB ), - Tightening( 16, -0.77, Tightening::LB ), - - Tightening( 19, -2.3133, Tightening::LB ), - Tightening( 20, -1.2, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), - Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), - Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), - Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), - Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), - - Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), - Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), - - Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), - Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), - - Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.5, Tightening::LB ), - Tightening( 9, -0.6, Tightening::LB ), - Tightening( 10, -1.5, Tightening::LB ), - - Tightening( 14, -1.1, Tightening::LB ), - Tightening( 15, -2.1771, Tightening::LB ), - Tightening( 16, -1.15, Tightening::LB ), - - Tightening( 19, -5.6259, Tightening::LB ), - Tightening( 20, -1.9, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_softmax_and_max() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), - Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), - Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), - - Tightening( 8, -0.1870, Tightening::LB ), Tightening( 8, 1.4488, Tightening::UB ), - Tightening( 9, 0.6814, Tightening::LB ), Tightening( 9, 2.2432, Tightening::UB ), - - Tightening( 10, 0.6814, Tightening::LB ), Tightening( 10, 2.2432, Tightening::UB ), - - Tightening( 11, -2.2432, Tightening::LB ), Tightening( 11, -0.6814, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.2194, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), - - Tightening( 10, 0.2194, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), - - Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.2194, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_softmax_and_max2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), - Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), - Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), - Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), - - Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9634, Tightening::UB ), - Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9152, Tightening::UB ), - Tightening( 13, -2.9868, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), - - Tightening( 14, 0.1143, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8694, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4596, Tightening::UB ), - - Tightening( 17, 0.1143, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), - - Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9326, Tightening::UB ), - Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), - Tightening( 13, -2.9990, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), - - Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), - - Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_relu_and_bilinear() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), - - Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_random_relu_and_bilinear2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), - Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), - - Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), - Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), - - Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - - void test_pmnr_gradient_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), - Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 8, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), - - Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 8, 0, Tightening::LB ), - Tightening( 9, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), - - Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), - Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), - - Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), - - Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), - - Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 19, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), - Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), - Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), - - Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), - Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), - Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), - - Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 20, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_sigmoid() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), - Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), - - Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), - Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), - - Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), - Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), - - Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), - Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), - Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), - - Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), - Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), - - Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), - Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), - - Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), - Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_sigmoid2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), - Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), - Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), - Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), - - Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), - Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), - Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), - - Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), - Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), - Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), - - Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), - Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), - - Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), - Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), - - Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), - Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), - Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), - - Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), - Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), - Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), - - Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), - Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), - Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), - - Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), - Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), - - Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), - Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), - - Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_abs() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), - - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), - - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_abs2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), - - Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), - - Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_round() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 11, -4, Tightening::LB ), - Tightening( 11, 6, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), - - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -10, Tightening::LB ), - Tightening( 7, 5, Tightening::UB ), - - Tightening( 11, -14, Tightening::LB ), - Tightening( 11, 11, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_round2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), - Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), - Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), - - Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), - Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), - - Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), - Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), - - Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), - Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), - Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), - Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), - - Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_sign() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_sign2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_leaky_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 4, -0.1, Tightening::LB ), - - Tightening( 8, -0.045, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), - - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), - - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), - - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_leaky_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyRelu2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), - Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), - Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), - Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), - Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), - - Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), - Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), - - Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), - Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), - - Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.3, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - Tightening( 10, -0.6, Tightening::LB ), - - Tightening( 14, -0.9, Tightening::LB ), - Tightening( 15, -0.89, Tightening::LB ), - Tightening( 16, -0.77, Tightening::LB ), - - Tightening( 19, -2.3133, Tightening::LB ), - Tightening( 20, -1.2, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), - Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), - Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), - Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), - Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), - - Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), - Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), - - Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), - Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), - - Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.5, Tightening::LB ), - Tightening( 9, -0.6, Tightening::LB ), - Tightening( 10, -1.5, Tightening::LB ), - - Tightening( 14, -1.1, Tightening::LB ), - Tightening( 15, -2.1771, Tightening::LB ), - Tightening( 16, -1.15, Tightening::LB ), - - Tightening( 19, -5.6259, Tightening::LB ), - Tightening( 20, -1.9, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_softmax_and_max() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), - Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), - Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), - - Tightening( 8, -0.1870, Tightening::LB ), Tightening( 8, 1.4488, Tightening::UB ), - Tightening( 9, 0.6814, Tightening::LB ), Tightening( 9, 2.2432, Tightening::UB ), - - Tightening( 10, 0.6814, Tightening::LB ), Tightening( 10, 2.2432, Tightening::UB ), - - Tightening( 11, -2.2432, Tightening::LB ), Tightening( 11, -0.6814, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.2194, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), - - Tightening( 10, 0.2194, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), - - Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.2194, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_softmax_and_max2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), - Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), - Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), - Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), - - Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9634, Tightening::UB ), - Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9152, Tightening::UB ), - Tightening( 13, -2.9868, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), - - Tightening( 14, 0.1143, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8694, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4596, Tightening::UB ), - - Tightening( 17, 0.1143, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), - - Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9326, Tightening::UB ), - Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), - Tightening( 13, -2.9990, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), - - Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), - - Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_relu_and_bilinear() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), - - Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_gradient_relu_and_bilinear2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), - Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), - - Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), - Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), - - Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with gradient heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), - Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 8, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), - - Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 8, 0, Tightening::LB ), - Tightening( 9, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), - - Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), - Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), - - Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), - - Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), - - Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 19, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), - Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), - Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), - - Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), - Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), - Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), - - Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 20, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_sigmoid() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithAbsAndRelu( nlr, tableau ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), - Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), - Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -5, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), - Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, -5, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), - Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, -14, Tightening::LB ), Tightening( 13, 26.25, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), - Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), - - Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), - Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), - - Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), - Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), - - Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), - Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), + List expectedBounds2( { + Tightening( 10, 0, Tightening::LB ), + Tightening( 11, 0, Tightening::LB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_bbps_sigmoid2() + void test_backward_round_and_sign() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + "backward-converge" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid2( nlr, tableau ); + populateNetworkWithRoundAndSign( nlr, tableau ); tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), - Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), - Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), - Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), - - Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), - Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), - Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), - - Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), - Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), - Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), - - Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), - Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), - - Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), - Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), - - Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), - Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), - Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), - - Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), - Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), - Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), - - Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), - Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), - Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), - - Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), - Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), - - Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), - Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), - - Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_pmnr_bbps_abs() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -5.5, Tightening::LB ), Tightening( 9, 7.5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), } ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); + } + + void test_backward_leaky_relu_and_sigmoid() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithLeakyReluAndSigmoid( nlr, tableau ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), + Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9997, Tightening::UB ), + Tightening( 11, 0.0180, Tightening::LB ), Tightening( 11, 0.9975, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), + Tightening( 12, 0.0025, Tightening::LB ), Tightening( 12, 0.9997, Tightening::UB ), + Tightening( 13, 0.0564, Tightening::LB ), Tightening( 13, 3.9922, Tightening::UB ), } ); + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds4( {} ); + List expectedBounds2( { + Tightening( 6, -0.5, Tightening::LB ), + Tightening( 7, -0.1, Tightening::LB ), + } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_bbps_abs2() + void test_backward_softmax_and_max() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + "backward-converge" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkBackwardAbs2( nlr, tableau ); + populateNetworkWithSoftmaxAndMax( nlr, tableau ); tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 5, 0.0066, Tightening::LB ), Tightening( 5, 0.9517, Tightening::UB ), + Tightening( 6, 0.0007, Tightening::LB ), Tightening( 6, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), + Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); + void test_backward_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithReluAndBilinear( nlr, tableau ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 35, Tightening::UB ), - Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), + Tightening( 11, -35, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), } ); + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds4( {} ); + List expectedBounds2( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_bbps_round() + void test_pmnr_invprop_abs_and_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkBackwardRound( nlr, tableau ); + populateNetworkWithAbsAndRelu( nlr, tableau ); - tableau.setLowerBound( 0, 0 ); + tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); + tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -5, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, -5, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, -14, Tightening::LB ), Tightening( 13, 26.25, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke Invprop TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - Tightening( 11, -4, Tightening::LB ), - Tightening( 11, 6, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), + Tightening( 11, 0, Tightening::LB ), + + Tightening( 12, 0, Tightening::LB ), + Tightening( 13, 0, Tightening::LB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + void test_pmnr_invprop_round_and_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithRoundAndSign( nlr, tableau ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), + Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -5.5, Tightening::LB ), Tightening( 9, 7.5, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), } ); + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke Invprop TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds4( { - Tightening( 7, -10, Tightening::LB ), - Tightening( 7, 5, Tightening::UB ), + List expectedBounds2( { + Tightening( 9, -4.75, Tightening::LB ), + Tightening( 9, 6.75, Tightening::UB ), - Tightening( 11, -14, Tightening::LB ), - Tightening( 11, 11, Tightening::UB ), + Tightening( 12, 1, Tightening::UB ), + Tightening( 13, 4, Tightening::UB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_bbps_round2() + void test_pmnr_invprop_leaky_relu_and_sigmoid() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkBackwardRound2( nlr, tableau ); + populateNetworkWithLeakyReluAndSigmoid( nlr, tableau ); tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), - Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), - Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), - Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), + Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), - Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9997, Tightening::UB ), + Tightening( 11, 0.0180, Tightening::LB ), Tightening( 11, 0.9975, Tightening::UB ), - Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), + Tightening( 12, 0.0025, Tightening::LB ), Tightening( 12, 0.9997, Tightening::UB ), + Tightening( 13, 0.0564, Tightening::LB ), Tightening( 13, 3.9922, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke Invprop TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds2( {} ); + List expectedBounds2( { + Tightening( 6, -0.5, Tightening::LB ), + Tightening( 7, -0.1, Tightening::LB ), + Tightening( 8, 7.1, Tightening::UB ), + Tightening( 8, -1.5, Tightening::LB ), + Tightening( 9, 5.1, Tightening::UB ), + Tightening( 9, -1.1, Tightening::LB ), + Tightening( 10, 0.0845, Tightening::LB ), + Tightening( 10, 0.9993, Tightening::UB ), + Tightening( 11, 0.2181, Tightening::LB ), + Tightening( 11, 0.9949, Tightening::UB ), + Tightening( 12, 0.0845, Tightening::LB ), + Tightening( 12, 0.9993, Tightening::UB ), + Tightening( 13, 0.7410, Tightening::LB ), + Tightening( 13, 3.9841, Tightening::UB ), + } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); + void test_pmnr_invprop_softmax_and_max() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithSoftmaxAndMax( nlr, tableau ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), - Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), - Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + Tightening( 5, 0.0066, Tightening::LB ), Tightening( 5, 0.9517, Tightening::UB ), + Tightening( 6, 0.0007, Tightening::LB ), Tightening( 6, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), - Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), - Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), - Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), + Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ), } ); + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke Invprop TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds4( {} ); + List expectedBounds2( { + Tightening( 8, -0.6812, Tightening::LB ), + Tightening( 8, 1.8414, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_bbps_sign() + void test_pmnr_invprop_relu_and_bilinear() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkBackwardSign( nlr, tableau ); + populateNetworkWithReluAndBilinear( nlr, tableau ); - tableau.setLowerBound( 0, 0 ); + tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); + tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 35, Tightening::UB ), - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + Tightening( 11, -35, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke Invprop TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + void test_pmnr_random_abs_and_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithAbsAndRelu( nlr, tableau ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -5, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, -5, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, -14, Tightening::LB ), Tightening( 13, 26.25, Tightening::UB ), } ); + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke PMNR with random neuron selection TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds4( {} ); + List expectedBounds2( { + Tightening( 10, 0, Tightening::LB ), + Tightening( 11, 0, Tightening::LB ), - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + Tightening( 12, 0, Tightening::LB ), + Tightening( 13, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_bbps_sign2() + void test_pmnr_random_round_and_sign() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + "backward-pmnr-random" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkBackwardSign2( nlr, tableau ); + populateNetworkWithRoundAndSign( nlr, tableau ); tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -5.5, Tightening::LB ), Tightening( 9, 7.5, Tightening::UB ), - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke PMNR with random neuron selection TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds2( {} ); + List expectedBounds2( { + Tightening( 9, -4.75, Tightening::LB ), + Tightening( 9, 6.75, Tightening::UB ), + } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); + void test_pmnr_random_leaky_relu_and_sigmoid() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithLeakyReluAndSigmoid( nlr, tableau ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9997, Tightening::UB ), + Tightening( 11, 0.0180, Tightening::LB ), Tightening( 11, 0.9975, Tightening::UB ), - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + Tightening( 12, 0.0025, Tightening::LB ), Tightening( 12, 0.9997, Tightening::UB ), + Tightening( 13, 0.0564, Tightening::LB ), Tightening( 13, 3.9922, Tightening::UB ), } ); + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke PMNR with random neuron selection TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds4( {} ); + List expectedBounds2( { + Tightening( 6, -0.5, Tightening::LB ), + Tightening( 7, -0.1, Tightening::LB ), + Tightening( 8, -1.5, Tightening::LB ), + Tightening( 8, 7.1, Tightening::UB ), + Tightening( 9, -1.1, Tightening::LB ), + Tightening( 9, 5.1, Tightening::UB ), + Tightening( 10, 0.0266, Tightening::LB ), + Tightening( 10, 0.9995, Tightening::UB ), + Tightening( 11, 0.1679, Tightening::LB ), + Tightening( 11, 0.9960, Tightening::UB ), + Tightening( 12, 0.0266, Tightening::LB ), + Tightening( 12, 0.9995, Tightening::UB ), + Tightening( 13, 0.5302, Tightening::LB ), + Tightening( 13, 3.9875, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_bbps_leaky_relu() + void test_pmnr_random_softmax_and_max() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + "backward-pmnr-random" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); + populateNetworkWithSoftmaxAndMax( nlr, tableau ); - tableau.setLowerBound( 0, 0 ); + tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); + tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 5, 0.0066, Tightening::LB ), Tightening( 5, 0.9517, Tightening::UB ), + Tightening( 6, 0.0007, Tightening::LB ), Tightening( 6, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke PMNR with random neuron selection TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - Tightening( 4, -0.1, Tightening::LB ), - - Tightening( 8, -0.045, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), + Tightening( 8, -0.6812, Tightening::LB ), + Tightening( 8, 1.8409, Tightening::UB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + void test_pmnr_random_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-random" ); - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithReluAndBilinear( nlr, tableau ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 35, Tightening::UB ), - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + Tightening( 11, -35, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), } ); + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke PMNR with random neuron selection TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds4( { - Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), + List expectedBounds2( {} ); - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_bbps_leaky_relu2() + void test_pmnr_gradient_abs_and_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + "backward-pmnr-gradient" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyRelu2( nlr, tableau ); + populateNetworkWithAbsAndRelu( nlr, tableau ); tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), - Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), - Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), - Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), - Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), - Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -5, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), - Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, -5, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, -14, Tightening::LB ), Tightening( 13, 26.25, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke PMNR with gradient-based heuristic for neuron selection TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.3, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - Tightening( 10, -0.6, Tightening::LB ), - - Tightening( 14, -0.9, Tightening::LB ), - Tightening( 15, -0.89, Tightening::LB ), - Tightening( 16, -0.77, Tightening::LB ), + Tightening( 10, 0, Tightening::LB ), + Tightening( 11, 0, Tightening::LB ), - Tightening( 19, -2.3133, Tightening::LB ), - Tightening( 20, -1.2, Tightening::LB ), + Tightening( 12, 0, Tightening::LB ), + Tightening( 13, 0, Tightening::LB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); + void test_pmnr_gradient_round_and_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithRoundAndSign( nlr, tableau ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), - Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), - Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), - Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), - Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), - Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -5.5, Tightening::LB ), Tightening( 9, 7.5, Tightening::UB ), - Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), - Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), } ); + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke PMNR with gradient-based heuristic for neuron selection TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds4( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.5, Tightening::LB ), - Tightening( 9, -0.6, Tightening::LB ), - Tightening( 10, -1.5, Tightening::LB ), + List expectedBounds2( + { Tightening( 9, -4.75, Tightening::LB ), Tightening( 9, 6.75, Tightening::UB ) } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); + } + + void test_pmnr_gradient_leaky_relu_and_sigmoid() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithLeakyReluAndSigmoid( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 14, -1.1, Tightening::LB ), - Tightening( 15, -2.1771, Tightening::LB ), - Tightening( 16, -1.15, Tightening::LB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9997, Tightening::UB ), + Tightening( 11, 0.0180, Tightening::LB ), Tightening( 11, 0.9975, Tightening::UB ), - Tightening( 19, -5.6259, Tightening::LB ), - Tightening( 20, -1.9, Tightening::LB ), + Tightening( 12, 0.0025, Tightening::LB ), Tightening( 12, 0.9997, Tightening::UB ), + Tightening( 13, 0.0564, Tightening::LB ), Tightening( 13, 3.9922, Tightening::UB ), } ); + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Invoke PMNR with gradient-based heuristic for neuron selection + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 6, -0.5, Tightening::LB ), + Tightening( 7, -0.1, Tightening::LB ), + Tightening( 8, -1.5, Tightening::LB ), + Tightening( 8, 7.1, Tightening::UB ), + Tightening( 9, -1.1, Tightening::LB ), + Tightening( 9, 5.1, Tightening::UB ), + Tightening( 10, 0.0230, Tightening::LB ), + Tightening( 10, 0.9995, Tightening::UB ), + Tightening( 11, 0.1483, Tightening::LB ), + Tightening( 11, 0.9961, Tightening::UB ), + Tightening( 12, 0.0230, Tightening::LB ), + Tightening( 12, 0.9995, Tightening::UB ), + Tightening( 13, 0.4680, Tightening::LB ), + Tightening( 13, 3.9879, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_bbps_softmax_and_max() + void test_pmnr_gradient_softmax_and_max() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + "backward-pmnr-gradient" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); + populateNetworkWithSoftmaxAndMax( nlr, tableau ); - tableau.setLowerBound( 0, 0 ); + tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); + tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - - // Invoke SBT + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), - Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), - Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), + Tightening( 5, 0.0066, Tightening::LB ), Tightening( 5, 0.9517, Tightening::UB ), + Tightening( 6, 0.0007, Tightening::LB ), Tightening( 6, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), - Tightening( 8, -0.1870, Tightening::LB ), Tightening( 8, 1.4488, Tightening::UB ), - Tightening( 9, 0.6814, Tightening::LB ), Tightening( 9, 2.2432, Tightening::UB ), + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), - Tightening( 10, 0.6814, Tightening::LB ), Tightening( 10, 2.2432, Tightening::UB ), + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - Tightening( 11, -2.2432, Tightening::LB ), Tightening( 11, -0.6814, Tightening::UB ), + Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke PMNR with gradient-based heuristic for neuron selection TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds2( {} ); + List expectedBounds2( { + Tightening( 8, -0.6812, Tightening::LB ), + Tightening( 8, 1.8414, Tightening::UB ), + } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); + } + + void test_pmnr_gradient_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithReluAndBilinear( nlr, tableau ); - // Change the current bounds - tableau.setLowerBound( 0, -3 ); + tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - + tableau.setUpperBound( 1, 1 ); // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.2194, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0.2194, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 35, Tightening::UB ), - Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.2194, Tightening::UB ), + Tightening( 11, -35, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), } ); + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke PMNR with gradient-based heuristic for neuron selection TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds4( {} ); + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_bbps_softmax_and_max2() + void test_pmnr_bbps_abs_and_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr-bbps" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); + populateNetworkWithAbsAndRelu( nlr, tableau ); tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), - Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), - Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), - Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9634, Tightening::UB ), - Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9152, Tightening::UB ), - Tightening( 13, -2.9868, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -5, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - Tightening( 14, 0.1143, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8694, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4596, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, -5, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - Tightening( 17, 0.1143, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, -14, Tightening::LB ), Tightening( 13, 26.25, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke PMNR with BBPS-based heuristic for neuron selection TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds2( {} ); + List expectedBounds2( { + Tightening( 10, 0, Tightening::LB ), + Tightening( 11, 0, Tightening::LB ), - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + Tightening( 12, 0, Tightening::LB ), + Tightening( 13, 0, Tightening::LB ), + Tightening( 13, 26, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); + void test_pmnr_bbps_round_and_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithRoundAndSign( nlr, tableau ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9326, Tightening::UB ), - Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), - Tightening( 13, -2.9990, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), + Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -5.5, Tightening::LB ), Tightening( 9, 7.5, Tightening::UB ), - Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), } ); + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke PMNR with BBPS-based heuristic for neuron selection TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds4( {} ); + List expectedBounds2( + { Tightening( 9, -4.75, Tightening::LB ), Tightening( 9, 6.75, Tightening::UB ) } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_bbps_relu_and_bilinear() + void test_pmnr_bbps_leaky_relu_and_sigmoid() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, @@ -11058,118 +2048,134 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear( nlr, tableau ); + populateNetworkWithLeakyReluAndSigmoid( nlr, tableau ); - tableau.setLowerBound( 0, 0 ); + tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); + tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9997, Tightening::UB ), + Tightening( 11, 0.0180, Tightening::LB ), Tightening( 11, 0.9975, Tightening::UB ), - Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + Tightening( 12, 0.0025, Tightening::LB ), Tightening( 12, 0.9997, Tightening::UB ), + Tightening( 13, 0.0564, Tightening::LB ), Tightening( 13, 3.9922, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke PMNR with BBPS-based heuristic for neuron selection TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds2( {} ); + List expectedBounds2( { + Tightening( 6, -0.5, Tightening::LB ), + Tightening( 7, -0.1, Tightening::LB ), + Tightening( 8, -1.5, Tightening::LB ), + Tightening( 8, 7.1, Tightening::UB ), + Tightening( 9, -1.1, Tightening::LB ), + Tightening( 9, 5.1, Tightening::UB ), + Tightening( 10, 0.0269, Tightening::LB ), + Tightening( 10, 0.9995, Tightening::UB ), + Tightening( 11, 0.1696, Tightening::LB ), + Tightening( 11, 0.9960, Tightening::UB ), + Tightening( 12, 0.0269, Tightening::LB ), + Tightening( 12, 0.9995, Tightening::UB ), + Tightening( 13, 0.5358, Tightening::LB ), + Tightening( 13, 3.9875, Tightening::UB ), + } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + void test_pmnr_bbps_softmax_and_max() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkWithSoftmaxAndMax( nlr, tableau ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 5, 0.0066, Tightening::LB ), Tightening( 5, 0.9517, Tightening::UB ), + Tightening( 6, 0.0007, Tightening::LB ), Tightening( 6, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ), } ); + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke PMNR with BBPS-based heuristic for neuron selection TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), + List expectedBounds2( { + Tightening( 8, -0.6812, Tightening::LB ), + Tightening( 8, 1.8414, Tightening::UB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_bbps_relu_and_bilinear2() + void test_pmnr_bbps_relu_and_bilinear() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, @@ -11178,143 +2184,178 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + populateNetworkWithReluAndBilinear( nlr, tableau ); tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), - Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 35, Tightening::UB ), - Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + Tightening( 11, -35, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke PMNR with BBPS heuristic for neuron selection + // Invoke PMNR with BBPS-based heuristic for neuron selection TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds2( { - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); + List expectedBounds2( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + List> infeasibleBranches( {} ); + List> expectedInfeasibleBranches( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); + TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); + } + + void test_pmnr_bbps_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkMinimalReLU( nlr, tableau ); - // Change the current bounds - tableau.setLowerBound( 0, -3 ); + tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); + tableau.setUpperBound( 1, 1 ); - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly() ); + /* + Input ranges: - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + x0: [-1, 1] + x1: [-1, 1] + + Layers 1, 2: + + x2 = x0 + x1 + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3 = x0 - x1 + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + Both ReLUs are undecided, bounds are concretized. 2 = ub <= -lb = 2, using ReLU lower + coefficient of 0. Upper coefficient: 2/( 2--2 ) = 2/4 = 0.5 - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + 0 <= x4 <= 0.5x2 + 1 + x4.lb = 0 + x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 + x4 range: [0, 2] - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + 0 <= x5 <= 0.5x3 + 1 + x5.lb = 0 + x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 + x5 range: [0, 2] - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), - Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + Layers 3, 4: - Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + x6 = x4 + 2x5 + x6.lb = 1 ( 0 ) + 2 ( 0 ) = 0 : [0, 0] + x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 2 ( 0.5x0 - 0.5x1 + 1 ) = 1.5 x0 - 0.5 x1 + 3 : [1, 5] + x6 range: [0, 5] + + x7 = x5 + 1.5 + x7.lb = 1 ( 0 ) + 1.5 = 1.5 : [1.5, 1.5] + x7.ub = 1 ( 0.5x0 - 0.5x1 + 1 ) + 1.5 = 0.5x0 - 0.5x1 + 2.5 : [1.5, 3.5] + x7 range: [1.5, 3.5] + + Both ReLU are active, bounds surive the activation + + x6 <= x8 <= x6 + x8.lb = 0 + x8.ub = 1.5 x0 - 0.5 x1 + 3 + x8 range: [0, 5] + + x7 <= x9 <= x7 + x9.lb = 1.5 + x9.ub = 0.5x0 - 0.5x1 + 2.5 + x9 range: [1.5, 3.5] + + Layer 5: + x10 = - x8 + x9 + 1 + x10.lb = -1 ( x6 ) + 1 ( x7 ) + 1 = -1 ( x4 + 2x5 ) + 1 ( x5 + 1.5 ) + 1 = -x4 - x5 + 2.5 + >= - ( 0.5x2 + 1 ) - ( 0.5x3 + 1 ) + 2.5 = -0.5x2 - 0.5x3 + 0.5 = -x0 + 0.5 >= -0.5 : + [-0.5, -0.5] x10.ub = -1 ( x6 ) + 1 ( x7 ) + 1 = -1 ( x4 + 2x5 ) + 1 ( x5 + 1.5 ) + 2.5 = + -x4 - x5 + 2.5 + <= - ( 0 ) - ( 0 ) + 2.5 = 2.5 : [2.5, 2.5] + x10 range: [-0.5, 2.5] + + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 1.5, Tightening::LB ), + Tightening( 7, 3.5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), + Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 1.5, Tightening::LB ), + Tightening( 9, 3.5, Tightening::UB ), + Tightening( 10, -0.5, Tightening::LB ), + Tightening( 10, 2.5, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Invoke PMNR with BBPS heuristic for neuron selection TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); + List expectedBounds2( { Tightening( 10, 0.5000, Tightening::LB ) } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } bool boundsEqual( const List &bounds, const List &expectedBounds ) { - if ( bounds.size() < expectedBounds.size() ) + if ( bounds.size() != expectedBounds.size() ) return false; bool allFound = true; @@ -11333,38 +2374,78 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite return allFound; } - // Create list of all tightenings in newBounds for which there is no bound in newBounds or in - // bounds which is at least as tight. - List removeRedundancies( const List &bounds, - const List &newBounds ) + bool infeasibleBranchesEqual( + const List> &infeasibleBranches, + const List> &expectedInfeasibleBranches ) { - List minimalBounds; + if ( infeasibleBranches.size() != expectedInfeasibleBranches.size() ) + return false; - for ( const auto &newBound : newBounds ) + bool allFound = true; + for ( const auto &neuronToBranchIndex : infeasibleBranches ) { - bool foundTighter = false; - for ( const auto &bound : bounds ) + bool currentFound = false; + for ( const auto &expectedNeuronToBranchIndex : expectedInfeasibleBranches ) + { + currentFound |= + neuronToBranchIndexEqual( neuronToBranchIndex, expectedNeuronToBranchIndex ); + } + allFound &= currentFound; + } + return allFound; + } + + bool + neuronToBranchIndexEqual( const Map &neuronToBranchIndex, + const Map &expectedneuronToBranchIndex ) + { + if ( neuronToBranchIndex.size() != expectedneuronToBranchIndex.size() ) + return false; + + bool allFound = true; + for ( const auto &pair : neuronToBranchIndex ) + { + bool currentFound = false; + for ( const auto &expectedPair : expectedneuronToBranchIndex ) { - foundTighter |= - ( newBound._type == bound._type && newBound._variable == bound._variable && - ( ( newBound._type == Tightening::LB && - FloatUtils::lte( newBound._value, bound._value, 0.0001 ) ) || - ( newBound._type == Tightening::UB && - FloatUtils::gte( newBound._value, bound._value, 0.0001 ) ) ) ); + currentFound |= ( pair.first._layer == expectedPair.first._layer && + pair.first._neuron == expectedPair.first._neuron && + pair.second == expectedPair.second ); } + allFound &= currentFound; + } + return allFound; + } + + // Create list of all tightenings in newBounds for which there is no bound in newBounds which is + // at least as tight. + List removeRedundancies( const List &newBounds ) + { + List minimalBounds; + unsigned i = 0; + for ( const auto &newBound : newBounds ) + { + bool foundTighter = false; + unsigned j = 0; for ( const auto &bound : newBounds ) { - foundTighter |= - ( newBound._type == bound._type && newBound._variable == bound._variable && - ( ( newBound._type == Tightening::LB && - FloatUtils::lt( newBound._value, bound._value, 0.0001 ) ) || - ( newBound._type == Tightening::UB && - FloatUtils::gt( newBound._value, bound._value, 0.0001 ) ) ) ); + if ( i < j ) + { + foundTighter |= + ( newBound._type == bound._type && newBound._variable == bound._variable && + ( ( newBound._type == Tightening::LB && + FloatUtils::lte( newBound._value, bound._value, 0.0001 ) ) || + ( newBound._type == Tightening::UB && + FloatUtils::gte( newBound._value, bound._value, 0.0001 ) ) ) ); + } + ++j; } if ( !foundTighter ) minimalBounds.append( newBound ); + + ++i; } return minimalBounds; } diff --git a/src/nlr/tests/Test_PMNRSelection.h b/src/nlr/tests/Test_PMNRSelection.h index 9b877270c1..22bef3be40 100644 --- a/src/nlr/tests/Test_PMNRSelection.h +++ b/src/nlr/tests/Test_PMNRSelection.h @@ -1,8 +1,8 @@ /********************* */ -/*! \file Test_NetworkLevelReasoner2.h +/*! \file Test_PMNRSelection.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Andrew Wu + ** Guy Katz, Andrew Wu, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -144,13 +144,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 0, 0, 3, 0, -1 ); nlr.setWeight( 1, 0, 5, 0, 3 ); - nlr.setBias( 3, 0, 1 ); nlr.setBias( 5, 0, 1 ); // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 3, 0, 4, 0 ); // Variable indexing @@ -219,7 +217,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 3, 0, 4, 0 ); // Variable indexing @@ -350,6 +347,136 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 11, large ); } + void populateNetworkSBTAbsoluteValue( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + 2 R 1 + x0 --- x2 ---> x4 --- x6 + \ / / + 1 \ / / + \/ -1 / + /\ / + 3 / \ / + / \ R / + x1 --- x3 ---> x5 + 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + } + + void populateNetworkSBTSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + 2 R 1 + x0 --- x2 ---> x4 --- x6 + \ / / + 1 \ / / + \/ -1 / + /\ / + 3 / \ / + / \ R / + x1 --- x3 ---> x5 + 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + } + void populateNetworkSBTLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -497,6 +624,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Mark the Sigmoid sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); + + // Mark the Round sources nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); @@ -612,7 +741,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - x0 x3 S x6 x1 x4 S x7 @@ -661,6 +789,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 1, 2 ); nlr.setBias( 1, 2, 3 ); + // Mark the Softmax sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 0 ); nlr.addActivationSource( 1, 2, 2, 0 ); @@ -671,7 +800,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 1, 1, 2, 2 ); nlr.addActivationSource( 1, 2, 2, 2 ); - // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); @@ -714,7 +842,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - x0 x3 S x8 x1 x4 S x9 @@ -785,6 +912,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setBias( 1, 3, 2 ); nlr.setBias( 1, 4, 1 ); + // Mark the Softmax sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 2, 2, 0 ); nlr.addActivationSource( 1, 4, 2, 0 ); @@ -859,7 +987,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - x0 x2 x x4 -- x5 x1 x3 @@ -888,10 +1015,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 0, 1, 1, 1, 1 ); nlr.setWeight( 2, 0, 3, 0, -1 ); + // Mark the Bilinear sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 0 ); - // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); @@ -931,9 +1058,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 1 ); tableau.setUpperBound( 1, 5 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* Input ranges: @@ -1014,8 +1141,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 1 } ), + Vector( { 1, 1 } ), Vector( { 0, 0 } ), Vector( { 0, 0 } ) ); @@ -1043,21 +1170,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 1, 2 } ), Vector( { 0 } ), Vector( { 0 } ) ); - { - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - comparePMNRScores( nlr, Map( {} ) ); - } - { - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - comparePMNRScores( nlr, Map( {} ) ); - } - { - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - comparePMNRScores( nlr, Map( {} ) ); - } + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); } void test_symbolic_bound_maps_relus_active_and_inactive() @@ -1077,9 +1192,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Strong negative bias for x2, which is node (1,0) nlr.setBias( 1, 0, -30 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* Input ranges: @@ -1161,8 +1276,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), Vector( { 0, 0 } ), Vector( { 0, 0 } ) ); @@ -1190,6 +1305,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { -1, -1 } ), Vector( { 0 } ), Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); } void test_symbolic_bound_maps_relus_active_and_not_fixed() @@ -1209,9 +1327,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Strong negative bias for x2, which is node (1,0) nlr.setBias( 1, 0, -15 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* Input ranges: @@ -1297,8 +1415,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, 0, 0, 1 } ), - Vector( { 0.75, 0, 0, 1 } ), + Vector( { 1, 1 } ), + Vector( { 0.75, 1 } ), Vector( { 0, 0 } ), Vector( { 3, 0 } ) ); @@ -1326,6 +1444,99 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 0.5, 1.25 } ), Vector( { -15 } ), Vector( { -8.25 } ) ); + + // Non-fixed activation neurons: x4 (RELU). + compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); + } + + void test_gradient_selection_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Calculating Gradient-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: + x4 - x5 <= x6 <= x4 - x5. + x4 gradient: lower = ( 1 ), upper = ( 1 ), average = ( 1 ). + Gradient-based PMNR score of x4: ( 1 )^2 = 1. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 1 ); + } + + void test_bbps_selection_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x2, 0) for x4 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 0 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x4 <= 0. + Upper branch symbolic bounds: x2 <= x4 <= x2. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: x4 - x5 <= x6 <= x4 - x5. + Concretizing x5: x4 - 11 <= x6 <= x4 - 5. + + Lower branch, using x2: [-4, 0], 0 <= x4 <= 0: + Output symbolic bounds -11 <= x6 <= -5. + Concrete bounds: [-11, -5]. DeepPoly bounds: [-9, 1]. Improvement: 6. + + Upper branch, using x2: [0, 12], x2 <= x4 <= x2: + Output symbolic bounds x2 - 11 <= x6 <= x2 - 5. + Concrete bounds: [-11, 7]. DeepPoly bounds: [-9, 1]. Improvement: 0. + + Final score = ( 6 + 0 ) / 2 = 3. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 3 ); } void test_symbolic_bound_maps_relus_active_and_externally_fixed() @@ -1348,9 +1559,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // However, one of the ReLU's variables has been eliminated nlr.eliminateVariable( 2, -3 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* Input ranges: @@ -1431,8 +1642,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), Vector( { 0, 0 } ), Vector( { 0, 0 } ) ); @@ -1460,6 +1671,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { -1, -1 } ), Vector( { 0 } ), Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); } void test_symbolic_bound_maps_relu_residual1() @@ -1474,9 +1688,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* Input ranges: @@ -1624,61 +1838,185 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 1 } ), Vector( { 2.5 } ), Vector( { 5 } ) ); + + // Non-fixed activation neurons: x2 (RELU), x4 (RELU). + compareNonfixedNeurons( + nlr, Set( { NLR::NeuronIndex( 2, 0 ), NLR::NeuronIndex( 4, 0 ) } ) ); } - void test_symbolic_bound_maps_relu_residual2() + void test_gradient_selection_relu_residual1() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTReluResidual2( nlr, tableau ); + populateNetworkSBTReluResidual1( nlr, tableau ); tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* - Input ranges: + Calculating Gradient-based PMNR score of x2: + Symbolic bounds of output layer in terms of Layer 2: + -3x2 - 2 <= x5 <= -2x2 + 10. + x2 gradient: lower = ( -3 ), upper = ( -2 ), average = ( -2.5 ). + Gradient-based PMNR score of x2: ( -2.5 )^2 = 6.25. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 6.25 ); - x0: [-1, 1] + /* + Calculating Gradient-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 4: + 3x4 - 2 <= x5 <= 3x4 + 4. + x4 gradient: lower = ( 3 ), upper = ( 3 ), average = ( 3 ). + Gradient-based PMNR score of x4: ( 3 )^2 = 9. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 9 ); + } - Layers 1, 2: + void test_bbps_selection_relu_residual1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); - x1 = x0 - x1.lb = x0 : [-1, 1] - x1.ub = x0 : [-1, 1] + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual1( nlr, tableau ); - ReLU is undecided, bound is concretized. 1 = ub <= -lb = 1, using ReLU lower - coefficient of 0. Upper coefficient: 1/( 1--1 ) = 1/2 = 0.5 + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); - 0.5 x1 <= x2 <= 0.5x1 + 0.5 - x2.lb = 0 - x2.ub = 0.5x0 + 0.5 - x2 range: [0, 1] + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - Layers 3, 4 (with residual from x0): + // Using branching point (x1, 0) for x2 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 0 } ) ); - x3 = - x2 - x0 + 1 - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] - x3.ub = -1( 0 ) -1x0 + 1 = -x0 + 1 : [0, 2] - x3 range: [-1, 2] + // Using branching point (x3, 0) for x4 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 4, 0 ), + std::pair( { NLR::NeuronIndex( 3, 0 ), 0 } ) ); - ReLU is undecided, bound is concretized. 2 = ub > -lb = 1, using ReLU lower - coefficient of 1. Upper coefficient: 2/( 2--1 ) = 2/3. + /* + Lower branch symbolic bounds: 0 <= x2 <= 0. + Upper branch symbolic bounds: x1 <= x2 <= x1. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); - x3 <= x4 <= 2/3 x3 + 2/3 - x4.lb = -1.5x0 + 0.5 - x4.ub = 2/3 ( -x0 + 1 ) + 2/3 = -2/3 x0 + 4/3 : [1, 2] - x4 range: [-1, 2] + /* + Lower branch symbolic bounds: 0 <= x4 <= 0. + Upper branch symbolic bounds: x3 <= x4 <= x3. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 4, 0 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); - Layer 5 (with residual from x0): + /* Calculating BBPS-based PMNR score of x2: + Symbolic bounds of output layer in terms of Layer 2: -3x2 - 2 <= x5 <= -2x2 + 10. - x5 = 3x4 + x0 + 1 + Lower branch, using x1: [-1, 0], 0 <= x4 <= 0: + Output symbolic bounds -2 <= x5 <= 10. + Concrete bounds: [-2, 10]. DeepPoly bounds: [1, 6]. Improvement: 0. + + Upper branch, using x1: [0, 1], x2 <= x4 <= x2: + Output symbolic bounds -3x1 - 2 <= x5 <= -2x1 + 10. + Concrete bounds: [-5, 10]. DeepPoly bounds: [1, 6]. Improvement: 0. + + Final score = ( 0 + 0 ) / 2 = 0. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0 ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 4: 3x4 - 2 <= x5 <= 3x4 + 4. + + Lower branch, using x3: [-1, 0], 0 <= x4 <= 0: + Output symbolic bounds -2 <= x5 <= 4. + Concrete bounds: [-2, 4]. DeepPoly bounds: [1, 6]. Improvement: 2. + + Upper branch, using x3: [0, 2], x2 <= x4 <= x2: + Output symbolic bounds 3x3 - 2 <= x5 <= 3x3 + 4. + Concrete bounds: [-2, 10]. DeepPoly bounds: [1, 6]. Improvement: 0. + + Final score = ( 0 + 2 ) / 2 = 1. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 1 ); + } + + void test_symbolic_bound_maps_relu_residual2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Input ranges: + + x0: [-1, 1] + + Layers 1, 2: + + x1 = x0 + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. 1 = ub <= -lb = 1, using ReLU lower + coefficient of 0. Upper coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + 0.5 x1 <= x2 <= 0.5x1 + 0.5 + x2.lb = 0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layers 3, 4 (with residual from x0): + + x3 = - x2 - x0 + 1 + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] + x3.ub = -1( 0 ) -1x0 + 1 = -x0 + 1 : [0, 2] + x3 range: [-1, 2] + + ReLU is undecided, bound is concretized. 2 = ub > -lb = 1, using ReLU lower + coefficient of 1. Upper coefficient: 2/( 2--1 ) = 2/3. + + x3 <= x4 <= 2/3 x3 + 2/3 + x4.lb = -1.5x0 + 0.5 + x4.ub = 2/3 ( -x0 + 1 ) + 2/3 = -2/3 x0 + 4/3 : [1, 2] + x4 range: [-1, 2] + + Layer 5 (with residual from x0): + + x5 = 3x4 + x0 + 1 x5.lb = 3 ( -1.5x0 + 0.5 ) + 1 ( x0 ) + 1 = -3.5x0 + 2.5 : [-1, 6] x5.ub = 3 ( -2/3 x0 + 4/3 ) + 1 ( x0 ) + 1 = -x0 + 5 : [4, 6] x5 range: [-1, 6] @@ -1806,6 +2144,130 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { -1 } ), Vector( { 2.5 } ), Vector( { 5 } ) ); + + // Non-fixed activation neurons: x2 (RELU), x4 (RELU). + compareNonfixedNeurons( + nlr, Set( { NLR::NeuronIndex( 2, 0 ), NLR::NeuronIndex( 4, 0 ) } ) ); + } + + void test_gradient_selection_relu_residual2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Calculating Gradient-based PMNR score of x2: + Symbolic bounds of output layer in terms of Layer 2: + -3x2 + 2 <= x5 <= -2x2 + 6. + x4 gradient: lower = ( -3 ), upper = ( -2 ), average = ( -2.5 ). + Gradient-based PMNR score of x2: ( -2.5 )^2 = 6.25. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 6.25 ); + + /* + Calculating Gradient-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 4: + 3x4 <= x5 <= 3x4 + 2. + x4 gradient: lower = ( 3 ), upper = ( 3 ), average = ( 3 ). + Gradient-based PMNR score of x4: ( 3 )^2 = 9. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 9 ); + } + + void test_bbps_selection_relu_residual2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x1, 0) for x2 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 0 } ) ); + + // Using branching point (x3, 0) for x4 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 4, 0 ), + std::pair( { NLR::NeuronIndex( 3, 0 ), 0 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x2 <= 0. + Upper branch symbolic bounds: x1 <= x2 <= x1. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x4 <= 0. + Upper branch symbolic bounds: x3 <= x4 <= x3. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 4, 0 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* Calculating BBPS-based PMNR score of x2: + Symbolic bounds of output layer in terms of Layer 2: -3x2 + 2 <= x6 <= -2x2 + 6. + + Lower branch, using x1: [-1, 0], 0 <= x4 <= 0: + Output symbolic bounds 2 <= x6 <= 6. + Concrete bounds: [2, 6]. DeepPoly bounds: [-1, 6]. Improvement: 3. + + Upper branch, using x1: [0, 1], x2 <= x4 <= x2: + Output symbolic bounds -3x1 + 2 <= x6 <= -2x1 + 6. + Concrete bounds: [-1, 6]. DeepPoly bounds: [-1, 6]. Improvement: 0. + + Final score = ( 3 + 0 ) / 2 = 1.5. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 1.5 ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 4: 3x4 <= x6 <= 3x4 + 2. + + Lower branch, using x3: [0, 1], 0 <= x4 <= 0: + Output symbolic bounds 0 <= x6 <= 2. + Concrete bounds: [0, 2]. DeepPoly bounds: [-1, 6]. Improvement: 5. + + Upper branch, using x3: [0, 2], x2 <= x4 <= x2: + Output symbolic bounds 3x3 <= x6 <= 3x3 + 2. + Concrete bounds: [0, 8]. DeepPoly bounds: [-1, 6]. Improvement: 1. + + Final score = ( 5 + 1 ) / 2 = 3. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 3 ); } void test_symbolic_bound_maps_relu_reindex() @@ -1822,9 +2284,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* Input ranges: @@ -1961,15 +2423,15 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0, 0, 0, 0 } ), - Vector( { 0, 0.5, 0.5, 0 } ), + Vector( { 0, 0 } ), + Vector( { 0.5, 0.5 } ), Vector( { 0, 0 } ), Vector( { 1, 1 } ) ); comparePredecessorSymbolicBounds( nlr, 4, - Vector( { 0, 1, 0, 0 } ), - Vector( { 0, 1, 0.5, 0 } ), + Vector( { 0, 1 } ), + Vector( { 0.5, 1 } ), Vector( { 0, 0 } ), Vector( { 1, 0 } ) ); @@ -2009,73 +2471,214 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 1, 0.25, 0.5, 0.25 } ), Vector( { 1, 0 } ), Vector( { 4, 1.5 } ) ); + + // Non-fixed activation neurons: x4 (RELU), x5 (RELU), x9 (RELU). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 4, 0 ) } ) ); } - void test_symbolic_bound_maps_abs_all_positive() + void test_gradient_selection_relu_reindex() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); + populateNetworkSBTReluReindex( nlr, tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); + /* + Calculating Gradient-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: + x4 + x5 + 1 <= x10 <= 1.5x4 + 0.5x5 + 2, 0 <= x11 <= 0.5x4 - 0.5x5 + 1. + x4 gradient: lower = ( 1, 0 ), upper = ( 1.5, 0.5 ), average = ( 1.25, 0.25 ). + Gradient-based PMNR score of x4: ( 1.25 )^2 + ( 0.25 )^2 = 1.625. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0.625 ); - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + /* + Calculating Gradient-based PMNR score of x5: + Symbolic bounds of output layer in terms of Layer 2: + x4 + x5 + 1 <= x10 <= 1.5x4 + 0.5x5 + 2, 0 <= x11 <= 0.5x4 - 0.5x5 + 1. + x5 gradient: lower = ( 1, 0 ), upper = ( 0.5, -0.5 ), average = ( 0.75, -0.25 ). + Gradient-based PMNR score of x5: ( 0.75 )^2 + ( -0.25 )^2 = 0.625. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 1.625 ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + /* + Calculating Gradient-based PMNR score of x9: + Symbolic bounds of output layer in terms of Layer 4: + x8 + x9 + 1 <= x10 <= x8 + x9 + 1, x9 <= x11 <= x9. + x9 gradient: lower = ( 1, 1 ), upper = ( 1, 1 ), average = ( 1, 1 ). + Gradient-based PMNR score of x9: ( ( 1 )^2 + ( 1 )^2 ) / 2 = 2. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 2 ); + } - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + void test_bbps_selection_relu_reindex() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluReindex( nlr, tableau ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); - // Very loose bounds for neurons except inputs - double large = 1000000; + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + // Using branching point (x2, 0) for x4 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 1 ), 0 } ) ); + + // Using branching point (x3, 0) for x5 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 1 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 0 } ) ); + + // Using branching point (x7, 0) for x9 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 4, 0 ), + std::pair( { NLR::NeuronIndex( 3, 1 ), 0 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x4 <= 0. + Upper branch symbolic bounds: x2 <= x4 <= x2. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x5 <= 0. + Upper branch symbolic bounds: x3 <= x5 <= x3. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 1 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x9 <= 0. + Upper branch symbolic bounds: x7 <= x9 <= x7. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 4, 0 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: + x4 + x5 + 1 <= x10 <= 1.5x4 + 0.5x5 + 2, 0 <= x11 <= 0.5x4 - 0.5x5 + 1. + Concretizing x5: x4 + 1 <= x10 <= 1.5x4 + 3, 0 <= x11 <= 0.5x4 + 1. + + Lower branch, using x2: [-2, 0], 0 <= x4 <= 0: Output symbolic bounds: + 1 <= x10 <= 3, 0 <= x11 <= 1. + Concrete bounds: x10: [1, 3], x11: [0, 1]. + DeepPoly bounds: x10: [1, 5.5], x11: [0, 2]. + Improvement: 2.5 + 1 = 3.5. + + Upper branch, using x2: [0, 2], x2 <= x4 <= x2: Output symbolic bounds: + x2 + 1 <= x10 <= 1.5x2 + 3, 0 <= x11 <= 0.5x2 + 1. + Concrete bounds: x10: [1, 6], x11: [0, 2]. + DeepPoly bounds: x10: [1, 5.5], x11: [0, 2]. + Improvement: 0 + 0 = 0. + + Final score = ( 3.5 + 0 ) / 2 = 1.75. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 1.75 ); + + /* Calculating BBPS-based PMNR score of x5: + Symbolic bounds of output layer in terms of Layer 2: + x4 + x5 + 1 <= x10 <= 1.5x4 + 0.5x5 + 2, 0 <= x11 <= 0.5x4 - 0.5x5 + 1. + Concretizing x4: x5 + 1 <= x10 <= 0.5x5 + 5, 0 <= x11 <= -0.5x5 + 2. + + Lower branch, using x3: [-2, 0], 0 <= x5 <= 0: Output symbolic bounds: + 1 <= x10 <= 5, 0 <= x11 <= 2. + Concrete bounds: x10: [1, 5], x11: [0, 2]. + DeepPoly bounds: x10: [1, 5.5], x11: [0, 2]. + Improvement: 0.5 + 0 = 0.5. + + Upper branch, using x3: [0, 2], x3 <= x5 <= x3: Output symbolic bounds: + x3 + 1 <= x10 <= 0.5x3 + 5, 0 <= x11 <= -0.5x3 + 2. + Concrete bounds: x10: [1, 6], x11: [0, 2]. + DeepPoly bounds: x10: [1, 5.5], x11: [0, 2]. + Improvement: 0 + 0 = 0. + + Final score = ( 0.5 + 0 ) / 2 = 0.25. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0.25 ); + + /* Calculating BBPS-based PMNR score of x9: + Symbolic bounds of output layer in terms of Layer 4: + x8 + x9 + 1 <= x10 <= x8 + x9 + 1, x9 <= x11 <= x9. + Concretizing x8: x9 + 1 <= x10 <= x9 + 4, x9 <= x11 <= x9. + + Lower branch, using x7: [-2, 0], 0 <= x9 <= 0: + Output symbolic bounds: 1 <= x10 <= 4, 0 <= x11 <= 0. + Concrete bounds: x10: [1, 4], x11: [0, 0]. + DeepPoly bounds: x10: [1, 5.5], x11: [0, 2]. + Improvement: 1.5 + 2 = 3.5. + + Lower branch, using x7: [0, 2], 0 <= x9 <= 0: + Output symbolic bounds: x7 + 1 <= x10 <= x7 + 4, x7 <= x11 <= x7. + Concrete bounds: x10: [1, 6], x11: [0, 2]. + DeepPoly bounds: x10: [1, 5.5], x11: [0, 2]. + Improvement: 0 + 0 = 0. + + Final score = ( 3.5 + 0 ) / 2 = 1.75. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 1.75 ); + } + + void test_symbolic_bound_maps_absolute_values_all_positive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); tableau.setLowerBound( 0, 4 ); tableau.setUpperBound( 0, 6 ); tableau.setLowerBound( 1, 1 ); tableau.setUpperBound( 1, 5 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* Input ranges: @@ -2155,8 +2758,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 1 } ), + Vector( { 1, 1 } ), Vector( { 0, 0 } ), Vector( { 0, 0 } ) ); @@ -2184,82 +2787,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 1, 2 } ), Vector( { 0 } ), Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); } - void test_symbolic_bound_maps_abs_positive_and_negative() + void test_symbolic_bound_maps_absolute_values_positive_and_negative() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + /* + Input ranges: - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); - - // Invoke initializeSymbolicBoundsMaps - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] + x0: [4, 6] + x1: [1, 5] Layers 1, 2: x2 = 2x0 + 3x1 - 30 @@ -2333,8 +2891,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { -1, 0, 0, 1 } ), - Vector( { -1, 0, 0, 1 } ), + Vector( { -1, 1 } ), + Vector( { -1, 1 } ), Vector( { 0, 0 } ), Vector( { 0, 0 } ) ); @@ -2362,6 +2920,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { -3, -4 } ), Vector( { 30 } ), Vector( { 30 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); } void test_symbolic_bound_maps_absolute_values_positive_and_not_fixed() @@ -2370,56 +2931,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); tableau.setLowerBound( 0, 4 ); tableau.setUpperBound( 0, 6 ); @@ -2429,9 +2942,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Strong negative bias for x2, which is node (1,0) nlr.setBias( 1, 0, -15 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* Input ranges: @@ -2515,8 +3028,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), Vector( { 0, 0 } ), Vector( { 12, 0 } ) ); @@ -2544,64 +3057,109 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { -1, -1 } ), Vector( { 0 } ), Vector( { 12 } ) ); + + // Non-fixed activation neurons: x4 (ABSOLUTE_VALUE). + compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); } - void test_symbolic_bound_maps_absolute_values_active_and_externally_fixed() + void test_gradient_selection_absolute_values_positive_and_not_fixed() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + /* + Calculating Gradient-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: + x4 - x5 <= x6 <= x4 - x5. + x4 gradient: lower = ( 1 ), upper = ( 1 ), average = ( 1 ). + Gradient-based PMNR score of x4: ( 1 )^2 = 1. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 1 ); + } - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + void test_bbps_selection_absolute_values_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); - // Very loose bounds for neurons except inputs - double large = 1000000; + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + // Using branching point (x2, 0) for x4 (ABSOLUTE_VALUE). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 0 } ) ); + + /* + Lower branch symbolic bounds: -x2 <= x4 <= -x2. + Upper branch symbolic bounds: x2 <= x4 <= x2. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { -1, 1 } ), + Vector( { -1, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: x4 - x5 <= x6 <= x4 - x5. + Concretizing x5: x4 - 11 <= x6 <= x4 - 5. + + Lower branch, using x2: [-4, 0], -x2 <= x4 <= -x2: + Output symbolic bounds -x2 - 11 <= x6 <= -x2 - 5. + Concrete bounds: [-11, -1]. DeepPoly bounds: [-11, 7]. Improvement: 8. + + Upper branch, using x2: [0, 12], x2 <= x4 <= x2: + Output symbolic bounds x2 - 11 <= x6 <= x2 - 5. + Concrete bounds: [-11, 7]. DeepPoly bounds: [-11, 7]. Improvement: 0. + + Final score = ( 8 + 0 ) / 2 = 4. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 4 ); + } + + void test_symbolic_bound_maps_absolute_values_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); tableau.setLowerBound( 0, 4 ); tableau.setUpperBound( 0, 6 ); @@ -2614,9 +3172,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // However, the weighted sum variable has been eliminated nlr.eliminateVariable( 2, -3 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* Input ranges: @@ -2699,8 +3257,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { -1, 0, 0, 1 } ), - Vector( { -1, 0, 0, 1 } ), + Vector( { -1, 1 } ), + Vector( { -1, 1 } ), Vector( { 0, 0 } ), Vector( { 0, 0 } ) ); @@ -2728,6 +3286,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { -1, -1 } ), Vector( { 3 } ), Vector( { 3 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); } void test_symbolic_bound_maps_signs_positive_and_not_fixed() @@ -2736,56 +3297,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + populateNetworkSBTSign( nlr, tableau ); tableau.setLowerBound( 0, 4 ); tableau.setUpperBound( 0, 6 ); @@ -2795,9 +3308,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Strong negative bias for x2, which is node (1,0) nlr.setBias( 1, 0, -15 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* Input ranges: @@ -2885,8 +3398,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0.1667, 0, 0, 0 } ), - Vector( { 0.5, 0, 0, 0 } ), + Vector( { 0.1667, 0 } ), + Vector( { 0.5, 0 } ), Vector( { -1, 1 } ), Vector( { 1, 1 } ) ); @@ -2914,64 +3427,109 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 1, 1.5 } ), Vector( { -4.5 } ), Vector( { -7.5 } ) ); + + // Non-fixed activation neurons: x4 (SIGN). + compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); } - void test_symbolic_bound_maps_signs_active_and_externally_fixed() + void test_gradient_selection_signs_positive_and_not_fixed() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); + populateNetworkSBTSign( nlr, tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + /* + Calculating Gradient-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: + x4 - x5 <= x6 <= x4 - x5. + x4 gradient: lower = ( 1 ), upper = ( 1 ), average = ( 1 ). + Gradient-based PMNR score of x4: ( 1 )^2 = 1. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 1 ); + } - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + void test_bbps_selection_signs_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSign( nlr, tableau ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); - // Very loose bounds for neurons except inputs - double large = 1000000; + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + // Using branching point (x2, 0) for x4 (SIGN). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 0 } ) ); + + /* + Lower branch symbolic bounds: -1 <= x4 <= -1. + Upper branch symbolic bounds: 1 <= x4 <= 1. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { -1, 1 } ), + Vector( { -1, 1 } ) ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: x4 - x5 <= x6 <= x4 - x5. + Concretizing x5: x4 - 11 <= x6 <= x4 - 5. + + Lower branch, using x2: [-4, 0], -1 <= x4 <= -1: + Output symbolic bounds -2 <= x6 <= -2. + Concrete bounds: [-2, -2]. DeepPoly bounds: [-2, 0]. Improvement: 2. + + Upper branch, using x2: [0, 12], 1 <= x4 <= 1: + Output symbolic bounds 0 <= x6 <= 0. + Concrete bounds: [0, 0]. DeepPoly bounds: [-2, 0]. Improvement: 2. + + Final score = ( 2 + 2 ) / 2 = 2. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 2 ); + } + + void test_symbolic_bound_maps_signs_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSign( nlr, tableau ); tableau.setLowerBound( 0, 4 ); tableau.setUpperBound( 0, 6 ); @@ -2984,9 +3542,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // However, the weighted sum variable has been eliminated nlr.eliminateVariable( 2, -3 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* Input ranges: @@ -3063,8 +3621,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0, 0, 0, 0 } ), - Vector( { 0, 0, 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), Vector( { -1, 1 } ), Vector( { -1, 1 } ) ); @@ -3092,10 +3650,15 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 0, 0 } ), Vector( { -2 } ), Vector( { -2 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); } void test_symbolic_bound_maps_leaky_relu() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -3106,9 +3669,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* Input ranges: @@ -3256,15 +3819,15 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, 0, 0, 1 } ), - Vector( { 0.6, 0, 0, 0.6 } ), + Vector( { 1, 1 } ), + Vector( { 0.6, 0.6 } ), Vector( { 0, 0 } ), Vector( { 0.8, 0.8 } ) ); comparePredecessorSymbolicBounds( nlr, 4, - Vector( { 1, 0, 0, 1 } ), - Vector( { 0.6667, 0, 0, 0.6 } ), + Vector( { 1, 1 } ), + Vector( { 0.6667, 0.6 } ), Vector( { 0, 0 } ), Vector( { 0.9333, 1.12 } ) ); @@ -3304,6 +3867,243 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 0.8, -0.24, 0.72, 0.96 } ), Vector( { 1, -0.8 } ), Vector( { 4.12, 1.6 } ) ); + + // Non-fixed activation neurons: x4 (LEAKY_RELU), x5 (LEAKY_RELU), x8 (LEAKY_RELU), x9 + // (LEAKY_RELU). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 4, 0 ), + NLR::NeuronIndex( 4, 1 ) } ) ); + } + + void test_gradient_selection_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Calculating Gradient-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: + 2x4 + 1 <= x10 <= 19/15 x4 + 1/15 x5 + 229/75, x4 - x5 <= x11 <= 0.6x4 - 0.6x5 + 1.12. + x4 gradient: lower = ( 2, 1 ), upper = ( 19/15, 0.6 ), average = ( 49/30, 0.8 ). + Gradient-based PMNR score of x4: ( 49/30 )^2 + ( 0.8 )^2 = 3.3078. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 3.3078 ); + + /* + Calculating Gradient-based PMNR score of x5: + Symbolic bounds of output layer in terms of Layer 2: + 2x4 + 1 <= x10 <= 19/15 x4 + 1/15 x5 + 229/75, x4 - x5 <= x11 <= 0.6x4 - 0.6x5 + 1.12. + x5 gradient: lower = ( 0, -1 ), upper = ( 1/15, -0.6 ), average = ( 1/30, -0.8 ). + Gradient-based PMNR score of x5: ( 1/30 )^2 + ( 0.8 )^2 = 0.6411. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 0.6411 ); + + /* + Calculating Gradient-based PMNR score of x8: + Symbolic bounds of output layer in terms of Layer 4: + x8 + x9 + 1 <= x10 <= x8 + x9 + 1, x9 <= x11 <= x9. + x8 gradient: lower = ( 1, 0 ), upper = ( 1, 0 ), average = ( 1, 0 ). + Gradient-based PMNR score of x8: ( 1 )^2 + ( 0 )^2 = 1. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 1 ); + + /* + Calculating Gradient-based PMNR score of x9: + Symbolic bounds of output layer in terms of Layer 4: + x8 + x9 + 1 <= x10 <= x8 + x9 + 1, x9 <= x11 <= x9. + x9 gradient: lower = ( 1, 1 ), upper = ( 1, 1 ), average = ( 1, 1 ). + Gradient-based PMNR score of x9: ( 1 )^2 + ( 1 )^2 = 2. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 1 ), 2 ); + } + + void test_bbps_selection_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x2, 0) for x4 (LEAKY_RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 0 } ) ); + + // Using branching point (x3, 0) for x5 (LEAKY_RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 1 ), + std::pair( { NLR::NeuronIndex( 1, 1 ), 0 } ) ); + + // Using branching point (x6, 0) for x8 (LEAKY_RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 4, 0 ), + std::pair( { NLR::NeuronIndex( 3, 0 ), 0 } ) ); + + // Using branching point (x7, 0) for x9 (LEAKY_RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 4, 1 ), + std::pair( { NLR::NeuronIndex( 3, 1 ), 0 } ) ); + + /* + Lower branch symbolic bounds: 0.2 x2 <= x4 <= 0.2 x2. + Upper branch symbolic bounds: x2 <= x4 <= x2. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0.2, 1 } ), + Vector( { 0.2, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* + Lower branch symbolic bounds: 0.2 x3 <= x5 <= 0.2 x3. + Upper branch symbolic bounds: x3 <= x5 <= x3. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 1 ), + Vector( { 0.2, 1 } ), + Vector( { 0.2, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* + Lower branch symbolic bounds: 0.2 x6 <= x8 <= 0.2 x6. + Upper branch symbolic bounds: x6 <= x8 <= x6. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 4, 0 ), + Vector( { 0.2, 1 } ), + Vector( { 0.2, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* + Lower branch symbolic bounds: 0.2 x7 <= x9 <= 0.2 x7. + Upper branch symbolic bounds: x7 <= x9 <= x7. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 4, 1 ), + Vector( { 0.2, 1 } ), + Vector( { 0.2, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: + 2x4 + 1 <= x10 <= 19/15 x4 + 1/15 x5 + 229/75, x4 - x5 <= x11 <= 0.6x4 - 0.6x5 + 1.12. + Concretizing x5: 2x4 + 1 <= x10 <= 19/15 x4 + 239/75, x4 - 2 <= x11 <= 0.6x4 + 2.32. + + Lower branch, using x2: [-2, 0], 0.2 x2 <= x4 <= 0.2 x2: Output symbolic bounds: + 0.4 x2 + 1 <= x10 <= 19/75 x2 + 239/75, 0.2 x2 - 2 <= x11 <= 0.12 x2 + 2.32. + Concrete bounds: x10: [0.2, 239/75], x11: [-2.4, 2.32]. + DeepPoly bounds: x10: [-3, 5.64], x11: [-2.8, 2.8]. + Improvement: 424/75 + 0.88 = 490/75. + + Upper branch, using x6: [0, 2], x2 <= x4 <= x2: Output symbolic bounds: + 2x2 + 1 <= x10 <= 19/15 x2 + 239/75, x2 - 2 <= x11 <= 0.6x2 + 2.32. + Concrete bounds: x10: [1, 5.72], x11: [-2, 3.52]. + DeepPoly bounds: x10: [-3, 5.64], x11: [-2.8, 2.8]. + Improvement: 4 + 0.8 = 4.8. + + Final score = ( 490/75 + 4.8 ) / 2 = 17/3 = 5.6667. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 5.6667 ); + + /* Calculating BBPS-based PMNR score of x5: + Symbolic bounds of output layer in terms of Layer 2: + 2x4 + 1 <= x10 <= 19/15 x4 + 1/15 x5 + 229/75, x4 - x5 <= x11 <= 0.6x4 - 0.6x5 + 1.12. + Concretizing x4: -3 <= x10 <= 1/15 x5 + 419/75, -x5 - 2 <= x11 <= -0.6x5 + 2.32. + + Lower branch, using x3: [-2, 0], 0.2 x3 <= x5 <= 0.2 x3: Output symbolic bounds: + -3 <= x10 <= 1/75 x3 + 419/75, -0.2 x3 - 2 <= x11 <= -0.12 x3 + 2.32. + Concrete bounds: x10: [-3, 419/75], x11: [-2, 2.56]. + DeepPoly bounds: x10: [-3, 5.64], x11: [-2.8, 2.8]. + Improvement: 4/75 + 1.04 = 82/75. + + Upper branch, using x3: [0, 2], x3 <= x5 <= x3: Output symbolic bounds: + -3 <= x10 <= 1/15 x3 + 419/75, -x3 - 2 <= x11 <= -0.6x3 + 2.32. + Concrete bounds: x10: [-3, 5.72], x11: [-4, 2.32]. + DeepPoly bounds: x10: [-3, 5.64], x11: [-2.8, 2.8]. + Improvement: 0 + 0.48 = 0.48. + + Final score = ( 82/75 + 0.48 ) / 2 = 59/75 = 0.7867. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 0.7867 ); + + /* Calculating BBPS-based PMNR score of x8: + Symbolic bounds of output layer in terms of Layer 4: + x8 + x9 + 1 <= x10 <= x8 + x9 + 1, x9 <= x11 <= x9. + Concretizing x9: x8 - 1.8 <= x10 <= x8 + 3.8, -2.8 <= x11 <= 2.8. + + Lower branch, using x6: [-2, 0], 0.2 x6 <= x8 <= 0.2 x6: Output symbolic bounds: + 0.2 x6 - 1.8 <= x10 <= 0.2 x6 + 3.8, -2.8 <= x11 <= 2.8. + Concrete bounds: x10: [-2.2, 3.8], x11: [-2.8, 2.8]. + DeepPoly bounds: x10: [-3, 5.64], x11: [-2.8, 2.8]. + Improvement: 2.64 + 2.8 = 2.64. + + Lower branch, using x6: [0, 2.8], x6 <= x8 <= x6: Output symbolic bounds: + x6 - 1.8 <= x10 <= x6 + 3.8, -2.8 <= x11 <= 2.8. + Concrete bounds: x10: [-1.8, 6.6], x11: [-2.8, 2.8]. + DeepPoly bounds: x10: [-3, 5.64], x11: [-2.8, 2.8]. + Improvement: 1.2 + 0 = 1.2. + + Final score = ( 2.64 + 1.2 ) / 2 = 1.92. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 1.92 ); + + /* Calculating BBPS-based PMNR score of x9: + Symbolic bounds of output layer in terms of Layer 4: + x8 + x9 + 1 <= x10 <= x8 + x9 + 1, x9 <= x11 <= x9. + Concretizing x8: x9 - 1 <= x10 <= x9 + 3.8, x9 <= x11 <= x9. + + Lower branch, using x7: [-2.8, 0], 0.2 x7 <= x9 <= 0.2 x7: Output symbolic bounds: + 0.2 x7 - 1 <= x10 <= 0.2 x7 + 3.8, 0.2 x7 <= x11 <= 0.2 x7. + Concrete bounds: x10: [-1.56, 3.8], x11: [-0.56, 0]. + DeepPoly bounds: x10: [-3, 5.64], x11: [-2.8, 2.8]. + Improvement: 3.28 + 5.04 = 8.32. + + Lower branch, using x7: [0, 2.8], x7 <= x9 <= x7: Output symbolic bounds: + x7 - 1 <= x10 <= x7 + 3.8, x7 <= x11 <= x7. + Concrete bounds: x10: [-1, 6.6], x11: [0, 2.8]. + DeepPoly bounds: x10: [-3, 5.64], x11: [-2.8, 2.8]. + Improvement: 2 + 2.8 = 4.8. + + Final score = ( 8.32 + 4.8 ) / 2 = 6.56. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 1 ), 6.56 ); } void test_symbolic_bound_maps_sigmoids_and_round() @@ -3320,9 +4120,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); @@ -3339,9 +4139,274 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); - // Layer 3 + // Layer 3 + /* + Double-check with Python + --- + from math import exp as e + def g(x): + return 1 / (1 + e(-x)) + + def g_prime(x): + return g(x) * (1 - g(x)) + + def lam(l, u): + return (g(u) - g(l)) / (u - l) + + def lam_prime(l, u): + return min(g_prime(l), g_prime(u)) + + l3 = l4 = -2 + u3 = u4 = 2 + l5 = l6 = g(-2) + u5 = u6 = g(2) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) + x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) + x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) + x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) + print(x7_l) + print(x7_u) + print(x8_l) + print(x8_u) + + ''' + Sigmoid linear relaxation ( Layer 2 ): + x4 >= lambda7_prime * x2 + ( g(l3) - lambda7_prime * l3 ) + x4 <= lambda7_prime * x2 + ( g(u3) - lambda7_prime * u3 ) + x5 >= lambda8_prime * x3 + ( g(l4) - lambda8_prime * l4 ) + x5 <= lambda8_prime * x3 + ( g(u4) - lambda8_prime * u4 ) + ''' + print('------------------') + print(lambda7_prime) + print(lambda8_prime) + print(g(l3) - lambda7_prime * l3) + print(g(u3) - lambda7_prime * u3) + print(g(l4) - lambda8_prime * l4) + print(g(u4) - lambda8_prime * u4) + + --- + [output]: + 0.4483930148512481 + 1.5516069851487517 + -0.5516069851487517 + 0.5516069851487517 + ------------------ + 0.1049935854035065 + 0.1049935854035065 + 0.3291900928291306 + 0.6708099071708693 + 0.3291900928291306 + 0.6708099071708693 + */ + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); + + // Layer 4 + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + + /* + Symbolic bounds of every activation layer in terms of predecessor: + + Layer 2 (SIGMOID): + 0.1050 x2 + 0.3292 <= x4 <= 0.1050 x2 + 0.6708 + 0.1050 x3 + 0.3292 <= x5 <= 0.1050 x3 + 0.6708 + + Layer 4 (ROUND): + x6 - 0.5 <= x8 <= x6 + 0.5 + x7 - 0.5 <= x9 <= x7 + 0.5 + + Symbolic bounds of output layer in terms of every layer (backsubstitution): + + Layer 4: + x8 <= x8 <= x8 + x9 <= x9 <= x9 + + Layer 3: + Using x6 - 0.5 <= x8 <= x6 + 0.5, x7 - 0.5 <= x9 <= x7 + 0.5: + x6 - 0.5 <= x8 <= x6 + 0.5 + x7 - 0.5 <= x9 <= x7 + 0.5 + + Layer 2: + Using x6 = x4 + x5, x7 = x4 - x5: + x4 + x5 - 0.5 <= x8 <= x4 + x5 + 0.5 + x4 - x5 - 0.5 <= x9 <= x4 - x5 + 0.5 + + Layer 1: + Using + 0.1050 x2 + 0.3292 <= x4 <= 0.1050 x2 + 0.6708, + 0.1050 x3 + 0.3292 <= x5 <= 0.1050 x3 + 0.6708: + 0.1050 x2 + 0.1050 x3 + 0.1584 <= x8 <= 0.1050 x2 + 0.1050 x3 + 1.8416 + 0.1050 x2 - 0.1050 x3 - 0.8416 <= x9 <= 0.1050 x2 - 0.1050 x3 + 0.8516 + + Layer 0: + Using x2 = x0 + x1, x3 = x0 - x1: + 0.2100 x0 + 0.1584 <= x8 <= 0.2100 x0 + 1.8416 + 0.2100 x1 - 0.8416 <= x9 <= 0.2100 x1 + 0.8516 + */ + comparePredecessorSymbolicBounds( nlr, + 2, + Vector( { 0.1050, 0.1050 } ), + Vector( { 0.1050, 0.1050 } ), + Vector( { 0.3292, 0.3292 } ), + Vector( { 0.6708, 0.6708 } ) ); + comparePredecessorSymbolicBounds( nlr, + 4, + Vector( { 1, 1 } ), + Vector( { 1, 1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + + compareOutputSymbolicBounds( nlr, + 4, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + compareOutputSymbolicBounds( nlr, + 3, + Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 0, 0, 1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + compareOutputSymbolicBounds( nlr, + 2, + Vector( { 1, 1, 1, -1 } ), + Vector( { 1, 1, 1, -1 } ), + Vector( { -0.5, -0.5 } ), + Vector( { 0.5, 0.5 } ) ); + compareOutputSymbolicBounds( nlr, + 1, + Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), + Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), + Vector( { 0.1584, -0.8416 } ), + Vector( { 1.8416, 0.8416 } ) ); + compareOutputSymbolicBounds( nlr, + 0, + Vector( { 0.2100, 0, 0, 0.2100 } ), + Vector( { 0.2100, 0, 0, 0.2100 } ), + Vector( { 0.1584, -0.8416 } ), + Vector( { 1.8416, 0.8416 } ) ); + + // Non-fixed activation neurons: x4 (SIGMOID), x5 (SIGMOID), x8 (ROUND), x9 (ROUND). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 4, 0 ), + NLR::NeuronIndex( 4, 1 ) } ) ); + } + + void test_gradient_selection_sigmoids_and_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSigmoidsAndRound( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Calculating Gradient-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: + x4 + x5 - 0.5 <= x8 <= x4 + x5 + 0.5, x4 - x5 - 0.5 <= x9 <= x4 - x5 + 0.5. + x4 gradient: lower = ( 1, 1 ), upper = ( 1, 1 ), average = ( 1, 1 ). + Gradient-based PMNR score of x4: ( 1 )^2 + ( 1 )^2 = 2. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 2 ); + + /* + Calculating Gradient-based PMNR score of x5: + Symbolic bounds of output layer in terms of Layer 2: + x4 + x5 - 0.5 <= x8 <= x4 + x5 + 0.5, x4 - x5 - 0.5 <= x9 <= x4 - x5 + 0.5. + x5 gradient: lower = ( 1, -1 ), upper = ( 1, -1 ), average = ( 1, -1 ). + Gradient-based PMNR score of x5: ( 1 )^2 + ( -1 )^2 = 2. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 2 ); + + /* + Calculating Gradient-based PMNR score of x8: + Symbolic bounds of output layer in terms of Layer 4: + x8 <= x8 <= x8, x9 <= x9 <= x9. + x8 gradient: lower = ( 1, 0 ), upper = ( 1, 0 ), average = ( 1, 0 ). + Gradient-based PMNR score of x8: ( 1 )^2 + ( 0 )^2 = 1. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 1 ); + + /* + Calculating Gradient-based PMNR score of x9: + Symbolic bounds of output layer in terms of Layer 4: + x8 <= x8 <= x8, x9 <= x9 <= x9. + x9 gradient: lower = ( 1, 0 ), upper = ( 1, 0 ), average = ( 1, 0 ). + Gradient-based PMNR score of x9: ( 1 )^2 + ( 0 )^2 = 1. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 1 ), 1 ); + } + + void test_bbps_selection_sigmoids_and_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSigmoidsAndRound( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x2, -2/11) for x4 (SIGMOID). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), -0.1818 } ) ); + + // Using branching point (x3, -2/11) for x5 (SIGMOID). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 1 ), + std::pair( { NLR::NeuronIndex( 1, 1 ), -0.1818 } ) ); + + // Using branching point (x6, 0.5) for x8 (ROUND). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 4, 0 ), + std::pair( { NLR::NeuronIndex( 3, 0 ), 0.5 } ) ); + + // Using branching point (x7, -0.5) for x9 (ROUND). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 4, 1 ), + std::pair( { NLR::NeuronIndex( 3, 1 ), -0.5 } ) ); + /* - Double-check with Python + Double-check with Python --- from math import exp as e def g(x): @@ -3357,28 +4422,45 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite return min(g_prime(l), g_prime(u)) l3 = l4 = -2 - u3 = u4 = 2 + u3 = u4 = -2/11 l5 = l6 = g(-2) + u5 = u6 = g(-2/11) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + + ''' + Sigmoid linear relaxation ( Layer 2 ), lower branches ( x2: [-2, 2/11], x3: [-2, 2/11] + ): x4 >= lambda7_prime * x2 + ( g(l3) - lambda7_prime * l3 ) x4 <= lambda7 * x2 + ( g(u3) + - lambda7 * u3 ) x5 >= lambda8_prime * x3 + ( g(l4) - lambda8_prime * l4 ) x5 <= lambda8 + * x3 + ( g(u4) - lambda8 * u4 ) + ''' + print('------------------') + print(lambda7_prime) + print(lambda7) + print(lambda8_prime) + print(lambda8) + print(g(l3) - lambda7_prime * l3) + print(g(u3) - lambda7 * u3) + print(g(l4) - lambda8_prime * l4) + print(g(u4) - lambda8 * u4) + + l3 = l4 = -2/11 + u3 = u4 = 2 + l5 = l6 = g(-2/11) u5 = u6 = g(2) lambda7 = lam(l3, u3) lambda7_prime = lam_prime(l3, u3) lambda8 = lam(l4, u4) lambda8_prime = lam_prime(l4, u4) - x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) - x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) - x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) - x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) - print(x7_l) - print(x7_u) - print(x8_l) - print(x8_u) ''' - Sigmoid linear relaxation ( Layer 2 ): + Sigmoid linear relaxation ( Layer 2 ), upper branches ( x2: [2/11, 2], x3: [2/11, 2] ): x4 >= lambda7_prime * x2 + ( g(l3) - lambda7_prime * l3 ) x4 <= lambda7_prime * x2 + ( g(u3) - lambda7_prime * u3 ) x5 >= lambda8_prime * x3 + ( g(l4) - lambda8_prime * l4 ) - x5 <= lambda8_prime * x3 + ( g(u4) - lambda7_prime * u4 ) + x5 <= lambda8_prime * x3 + ( g(u4) - lambda8_prime * u4 ) ''' print('------------------') print(lambda7_prime) @@ -3388,114 +4470,155 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite print(g(l4) - lambda8_prime * l4) print(g(u4) - lambda8_prime * u4) - --- [output]: - 0.4483930148512481 - 1.5516069851487517 - -0.5516069851487517 - 0.5516069851487517 ------------------ 0.1049935854035065 + 0.18450703649914935 0.1049935854035065 + 0.18450703649914935 0.3291900928291306 - 0.6708099071708693 + 0.4882169950204162 0.3291900928291306 - 0.6708099071708693 + 0.4882169950204162 + ------------------ + 0.10499358540350662 + 0.10499358540350662 + 0.47376000391211753 + 0.6708099071708691 + 0.47376000391211753 + 0.6708099071708691 + + Lower branch symbolic bounds: 0.1050 x2 + 0.3292 <= x4 <= 0.1845 x2 + 0.4882. + Upper branch symbolic bounds: 0.1050 x2 + 0.4737 <= x4 <= 0.1050 x2 + 0.6708. + + Lower branch symbolic bounds: 0.1050 x3 + 0.3292 <= x5 <= 0.1845 x3 + 0.4882. + Upper branch symbolic bounds: 0.1050 x3 + 0.4737 <= x5 <= 0.1050 x3 + 0.6708. */ - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0.1050, 0.1050 } ), + Vector( { 0.1845, 0.1050 } ), + Vector( { 0.3292, 0.4737 } ), + Vector( { 0.4882, 0.6708 } ) ); + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 1 ), + Vector( { 0.1050, 0.1050 } ), + Vector( { 0.1845, 0.1050 } ), + Vector( { 0.3292, 0.4737 } ), + Vector( { 0.4882, 0.6708 } ) ); - // Layer 4 - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + /* + Lower branch symbolic bounds: 0 <= x8 <= 0. + Upper branch symbolic bounds: x6 - 0.5 <= x8 <= x6 + 0.5. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 4, 0 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, -0.5 } ), + Vector( { 0, 0.5 } ) ); /* - Symbolic bounds of every activation layer in terms of predecessor: + Lower branch symbolic bounds: -1 <= x9 <= -1. + Upper branch symbolic bounds: x7 - 0.5 <= x9 <= x7 + 0.5. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 4, 1 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { -1, -0.5 } ), + Vector( { -1, 0.5 } ) ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: + x4 + x5 - 0.5 <= x8 <= x4 + x5 + 0.5, x4 - x5 - 0.5 <= x9 <= x4 - x5 + 0.5. + Concretizing x5: x4 - 0.3808 <= x8 <= x4 + 1.3808, x4 - 1.3808 <= x9 <= x4 + 0.3808. + + Lower branch, using x2: [-2, -2/11], 0.1050 x2 + 0.3292 <= x4 <= 0.1845 x2 + 0.4882: + Output symbolic bounds: + 0.1050 x2 - 0.0516 <= x8 <= 0.1845 x2 + 1.8690, + 0.1050 x2 - 1.0516 <= x9 <= 0.1845 x2 + 0.8690. + Concrete bounds: x8: [-0.2616, 1.8355], x9: [-1.2616, 0.8355]. + DeepPoly bounds: x8: [0, 2], x9: [-1, 1]. + Improvement: 0.1645 + 0.1645 = 0.3290. + + Upper branch, using x2: [-2/11, 2], 0.1050 x2 + 0.4737 <= x4 <= 0.1050 x2 + 0.6708: + Output symbolic bounds: + 0.1050 x2 + 0.0929 <= x8 <= 0.1050 x2 + 2.0516, + 0.1050 x2 - 0.9071 <= x9 <= 0.1050 x2 + 1.0516. + Concrete bounds: x8: [0.0738, 2.2616], x9: [-0.9262, 1.2616]. + DeepPoly bounds: x8: [0, 2], x9: [-1, 1]. + Improvement: 0.0738 + 0.0738 = 0.1476. + + Final score = ( 0.3290 + 0.1476 ) / 2 = 0.2384. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0.2384 ); + + /* Calculating BBPS-based PMNR score of x5: + Symbolic bounds of output layer in terms of Layer 2: + x4 + x5 - 0.5 <= x8 <= x4 + x5 + 0.5, x4 - x5 - 0.5 <= x9 <= x4 - x5 + 0.5. + Concretizing x4: x5 - 0.3808 <= x8 <= x5 + 1.3808, -x5 - 0.3808 <= x9 <= -x5 + 1.3808. + + Lower branch, using x2: [-2, -2/11], 0.1050 x3 + 0.3292 <= x5 <= 0.1845 x3 + 0.4882: + Output symbolic bounds: + 0.1050 x3 - 0.0516 <= x8 <= 0.1845 x3 + 1.8690, + -0.1845 x3 - 0.8690 <= x9 <= -0.1050 x3 + 1.0516. + Concrete bounds: x8: [-0.2616, 1.8355], x9: [-0.8355, 1.2616]. + DeepPoly bounds: x8: [0, 2], x9: [-1, 1]. + Improvement: 0.1645 + 0.1645 = 0.3290. + + Upper branch, using x2: [-2/11, 2], 0.1050 x3 + 0.4737 <= x5 <= 0.1050 x3 + 0.6708: + Output symbolic bounds: + 0.1050 x3 + 0.0929 <= x8 <= 0.1050 x3 + 2.0516, + -0.1050 x3 - 1.0516 <= x9 <= -0.1050 x3 + 0.9071. + Concrete bounds: x8: [0.0738, 2.2616], x9: [-1.1171, 0.9262]. + DeepPoly bounds: x8: [0, 2], x9: [-1, 1]. + Improvement: 0.0738 + 0.0738 = 0.1476. + + Final score = ( 0.3290 + 0.1476 ) / 2 = 0.2384. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 0.2384 ); - Layer 2 (SIGMOID): - 0.1050 x2 + 0.3292 <= x4 <= 0.1050 x2 + 0.6708 - 0.1050 x3 + 0.3292 <= x5 <= 0.1050 x3 + 0.6708 + /* Calculating BBPS-based PMNR score of x8: + Symbolic bounds of output layer in terms of Layer 4: x8 <= x8 <= x8, x9 <= x9 <= x9. + Concretizing x9: x8 <= x8 <= x8, -1 <= x9 <= 1. - Layer 4 (ROUND): - x6 - 0.5 <= x8 <= x6 + 0.5 - x7 - 0.5 <= x9 <= x7 + 0.5 + Lower branch, using x6: [0.4483, 0.5], 0 <= x8 <= 0: Output symbolic bounds: + 0 <= x8 <= 0, -1 <= x9 <= 1. + Concrete bounds: x8: [0, 0], x9: [-1, 1]. + DeepPoly bounds: x8: [0, 2], x9: [-1, 1]. + Improvement: 2. - Symbolic bounds of output layer in terms of every layer (backsubstitution): + Upper branch, using x6: [0.5, 1.5516], x6 - 0.5 <= x8 <= x6 + 0.5: + Output symbolic bounds: x6 - 0.5 <= x8 <= x6 + 0.5, -1 <= x9 <= 1. + Concrete bounds: x8: [0, 2.0516], x9: [-1, 1]. + DeepPoly bounds: x8: [0, 2], x9: [-1, 1]. + Improvement: 0. - Layer 4: - x8 <= x8 <= x8 - x9 <= x9 <= x9 + Final score = ( 2 + 0 ) / 2 = 1. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 1 ); - Layer 3: - Using x6 - 0.5 <= x8 <= x6 + 0.5, x7 - 0.5 <= x9 <= x7 + 0.5: - x6 - 0.5 <= x8 <= x6 + 0.5 - x7 - 0.5 <= x9 <= x7 + 0.5 + /* Calculating BBPS-based PMNR score of x9: + Symbolic bounds of output layer in terms of Layer 4: x8 <= x8 <= x8, x9 <= x9 <= x9. + Concretizing x8: 0 <= x8 <= 2, x9 <= x9 <= x9. - Layer 2: - Using x6 = x4 + x5, x7 = x4 - x5: - x4 + x5 - 0.5 <= x8 <= x4 + x5 + 0.5 - x4 - x5 - 0.5 <= x9 <= x4 - x5 + 0.5 + Lower branch, using x7: [-0.5516, -0.5], -1 <= x9 <= -1: + Output symbolic bounds: 0 <= x8 <= 2, -1 <= x9 <= -1. + Concrete bounds: x8: [0, 0], x9: [-1, -1]. + DeepPoly bounds: x8: [0, 2], x9: [-1, 1]. + Improvement: 2. - Layer 1: - Using - 0.1050 x2 + 0.3292 <= x4 <= 0.1050 x2 + 0.6708, - 0.1050 x3 + 0.3292 <= x5 <= 0.1050 x3 + 0.6708: - 0.1050 x2 + 0.1050 x3 + 0.1584 <= x8 <= 0.1050 x2 + 0.1050 x3 + 1.8416 - 0.1050 x2 - 0.1050 x3 - 0.8416 <= x9 <= 0.1050 x2 - 0.1050 x3 + 0.8516 + Upper branch, using x7: [-0.5, 0.5516], x7 - 0.5 <= x9 <= x7 + 0.5: + Output symbolic bounds: 0 <= x8 <= 2, x7 - 0.5 <= x9 <= x7 + 0.5. + Concrete bounds: x8: [0, 2], x9: [-1, 1.0516]. + DeepPoly bounds: x8: [0, 2], x9: [-1, 1]. + Improvement: 0. - Layer 0: - Using x2 = x0 + x1, x3 = x0 - x1: - 0.2100 x0 + 0.1584 <= x8 <= 0.2100 x0 + 1.8416 - 0.2100 x1 - 0.8416 <= x9 <= 0.2100 x1 + 0.8516 + Final score = ( 2 + 0 ) / 2 = 1. */ - comparePredecessorSymbolicBounds( nlr, - 2, - Vector( { 0.1050, 0, 0, 0.1050 } ), - Vector( { 0.1050, 0, 0, 0.1050 } ), - Vector( { 0.3292, 0.3292 } ), - Vector( { 0.6708, 0.6708 } ) ); - comparePredecessorSymbolicBounds( nlr, - 4, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), - Vector( { -0.5, -0.5 } ), - Vector( { 0.5, 0.5 } ) ); - - compareOutputSymbolicBounds( nlr, - 4, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), - Vector( { 0, 0 } ), - Vector( { 0, 0 } ) ); - compareOutputSymbolicBounds( nlr, - 3, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), - Vector( { -0.5, -0.5 } ), - Vector( { 0.5, 0.5 } ) ); - compareOutputSymbolicBounds( nlr, - 2, - Vector( { 1, 1, 1, -1 } ), - Vector( { 1, 1, 1, -1 } ), - Vector( { -0.5, -0.5 } ), - Vector( { 0.5, 0.5 } ) ); - compareOutputSymbolicBounds( nlr, - 1, - Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), - Vector( { 0.1050, 0.1050, 0.1050, -0.1050 } ), - Vector( { 0.1584, -0.8416 } ), - Vector( { 1.8416, 0.8416 } ) ); - compareOutputSymbolicBounds( nlr, - 0, - Vector( { 0.2100, 0, 0, 0.2100 } ), - Vector( { 0.2100, 0, 0, 0.2100 } ), - Vector( { 0.1584, -0.8416 } ), - Vector( { 1.8416, 0.8416 } ) ); + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 1 ), 1 ); } void test_symbolic_bound_maps_max_not_fixed() @@ -3512,9 +4635,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 2 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* Input ranges: @@ -3617,8 +4740,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, 0, 0, 0 } ), - Vector( { 0.6, 0, 0, 0.4 } ), + Vector( { 1, 0 } ), + Vector( { 0.6, 0.4 } ), Vector( { 0, 0 } ), Vector( { 1.2, 1.2 } ) ); comparePredecessorSymbolicBounds( nlr, @@ -3658,6 +4781,186 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 0, 0 } ), Vector( { 0 } ), Vector( { 6 } ) ); + + // Non-fixed activation neurons: x4 (RELU), x5 (RELU), x6 (MAX). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 3, 0 ) } ) ); + } + + void test_gradient_selection_max_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Calculating Gradient-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: + 2x5 <= x7 <= 6. + x4 gradient: lower = ( 0 ), upper = ( 0 ), average = ( 0 ). + Gradient-based PMNR score of x4: ( 0 )^2 = 0. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0 ); + + /* + Calculating Gradient-based PMNR score of x5: + Symbolic bounds of output layer in terms of Layer 2: + 2x5 <= x7 <= 6. + x5 gradient: lower = ( 2 ), upper = ( 0 ), average = ( 1 ). + Gradient-based PMNR score of x5: ( 1 )^2 = 1. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 1 ); + + /* + Calculating Gradient-based PMNR score of x6: + Symbolic bounds of output layer in terms of Layer 3: + 2x6 <= x7 <= 2x6. + x6 gradient: lower = ( 2 ), upper = ( 2 ), average = ( 2 ). + Gradient-based PMNR score of x6: ( 2 )^2 = 4. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 3, 0 ), 4 ); + } + + void test_bbps_selection_max_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x2, 0) for x4 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 0 } ) ); + + // Using branching point (x3, 0) for x5 (RELU). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 1 ), + std::pair( { NLR::NeuronIndex( 1, 1 ), 0 } ) ); + + // Using branching point (x5, 6/11) for x6 (MAX). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 3, 0 ), + std::pair( { NLR::NeuronIndex( 2, 1 ), 0.5455 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x4 <= 0. + Upper branch symbolic bounds: x2 <= x4 <= x2. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* + Lower branch symbolic bounds: 0 <= x5 <= 0. + Upper branch symbolic bounds: x3 <= x5 <= x3. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 1 ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ) ); + + /* + Lower branch, x4: [-2, 3], x5: [0, 6/11]: + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x5, and its upper bound is constant 3. + + Upper branch, x4: [-2, 3], x5: [6/11, 2]: + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x5, and its upper bound is constant 3. + + Lower branch symbolic bounds: x5 <= x6 <= 3. + Upper branch symbolic bounds: x5 <= x6 <= 3. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 3, 0 ), + Vector( { 1, 1 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { 3, 3 } ) ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: 2x5 <= x7 <= 6. + Concretizing x5: 0 <= x6 <= 6. + + Lower branch, using x2: [-2, 0], 0 <= x4 <= 0: + Output symbolic bounds 0 <= x6 <= 6. + Concrete bounds: [0, 6]. DeepPoly bounds: [0, 6]. Improvement: 0. + + Upper branch, using x2: [0, 3], x2 <= x4 <= x2: + Output symbolic bounds 0 <= x6 <= 6. + Concrete bounds: [0, 6]. DeepPoly bounds: [0, 6]. Improvement: 0. + + Final score = ( 0 + 0 ) / 2 = 0. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0 ); + + /* Calculating BBPS-based PMNR score of x5: + Symbolic bounds of output layer in terms of Layer 2: 2x5 <= x7 <= 6. + + Lower branch, using x3: [-3, 0], 0 <= x5 <= 0: + Output symbolic bounds 0 <= x6 <= 6. + Concrete bounds: [0, 6]. DeepPoly bounds: [0, 6]. Improvement: 0. + + Upper branch, using x3: [0, 2], x3 <= x5 <= x3: + Output symbolic bounds 2x3 <= x6 <= x6. + Concrete bounds: [0, 6]. DeepPoly bounds: [0, 6]. Improvement: 0. + + Final score = ( 0 + 0 ) / 2 = 0. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 0 ); + + /* Calculating BBPS-based PMNR score of x6: + Symbolic bounds of output layer in terms of Layer 3: 2x6 <= x7 <= 2x6. + + Lower branch, x5: [0, 6/11], using x5 <= x6 <= 3: + Output symbolic bounds 2x5 <= x6 <= 6. + Concrete bounds: [0, 6]. DeepPoly bounds: [0, 6]. Improvement: 0. + + Upper branch, x5: [6/11, 2], using x5 <= x6 <= 3: + Output symbolic bounds 2x5 <= x6 <= 6. + Concrete bounds: [12/11, 6]. DeepPoly bounds: [0, 6]. Improvement: 12/11. + + Final score = ( 0 + 12/11 ) / 2 = 6/11. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 3, 0 ), 0.5455 ); } void test_symbolic_bound_maps_max_fixed() @@ -3674,9 +4977,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, -3 ); tableau.setUpperBound( 1, -2 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* Input ranges: @@ -3772,8 +5075,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), Vector( { 0, 0 } ), Vector( { 0, 0 } ) ); comparePredecessorSymbolicBounds( nlr, @@ -3813,6 +5116,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 2, -2 } ), Vector( { 0 } ), Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); } void test_symbolic_bound_maps_softmax1() @@ -3831,9 +5137,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); } void test_symbolic_bound_maps_softmax2() @@ -3854,9 +5160,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, 1 ); tableau.setUpperBound( 2, 1.000001 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* Input ranges: @@ -3944,17 +5250,17 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite /* Layer 2: -0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 x6 range: [ 0.2595, 0.2595 ] --0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 x7 range: [ 0.7054, 0.7054 ] --0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277 + -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 x8 range: [ 0.0351, 0.0351 ] @@ -4009,9 +5315,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Symbolic bounds of every activation layer in terms of predecessor: Layer 2 (SOFTMAX): -0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 --0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 --0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 Symbolic bounds of output layer in terms of every layer (backsubstitution): @@ -4026,11 +5332,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 1: Using -0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243. --0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481. --0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277: - 1 <= x9 <= 1 - -1 <= x10 <= -1 + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243. + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + + 0.4481. -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + + 0.1277: 1 <= x9 <= 1 -1 <= x10 <= -1 Layer 0: 1 <= x9 <= 1 @@ -4083,6 +5388,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 0, 0, 0, 0, 0, 0 } ), Vector( { 1, -1 } ), Vector( { 1, -1 } ) ); + + // Non-fixed activation neurons: x6 (SOFTMAX), x7 (SOFTMAX), x8 (SOFTMAX). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 2, 2 ) } ) ); } { Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); @@ -4098,9 +5409,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, 1 ); tableau.setUpperBound( 2, 1.000001 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* Input ranges: @@ -4188,17 +5499,17 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite /* Layer 2: -0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 x6 range: [ 0.2595, 0.2595 ] --0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 x7 range: [ 0.7054, 0.7054 ] --0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277 + -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 x8 range: [ 0.0351, 0.0351 ] @@ -4252,9 +5563,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Symbolic bounds of every activation layer in terms of predecessor: Layer 2 (SOFTMAX): -0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 --0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 --0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 Symbolic bounds of output layer in terms of every layer (backsubstitution): @@ -4269,11 +5580,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 1: Using -0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243. --0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481. --0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277: - 1 <= x9 <= 1 - -1 <= x10 <= -1 + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243. + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + + 0.4481. -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + + 0.1277: 1 <= x9 <= 1 -1 <= x10 <= -1 Layer 0: 1 <= x9 <= 1 @@ -4326,9 +5636,158 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 0, 0, 0, 0, 0, 0 } ), Vector( { 1, -1 } ), Vector( { 1, -1 } ) ); + + // Non-fixed activation neurons: x6 (SOFTMAX), x7 (SOFTMAX), x8 (SOFTMAX). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 2, 2 ) } ) ); } } + void test_gradient_selection_softmax2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Calculating Gradient-based PMNR score of x6: + Symbolic bounds of output layer in terms of Layer 2: + x6 + x7 + x8 <= x9 <= x6 + x7 + x8, + -x6 - x7 - x8 <= x10 <= -x6 - x7 - x8. + x6 gradient: lower = ( 1, -1 ), upper = ( 1, -1 ), average = ( 1, -1 ). + Gradient-based PMNR score of x6: ( 1 )^2 + ( -1 )^2 = 2. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 2 ); + + /* + Calculating Gradient-based PMNR score of x7: + Symbolic bounds of output layer in terms of Layer 2: + x6 + x7 + x8 <= x9 <= x6 + x7 + x8, + -x6 - x7 - x8 <= x10 <= -x6 - x7 - x8. + x7 gradient: lower = ( 1, -1 ), upper = ( 1, -1 ), average = ( 1, -1 ). + Gradient-based PMNR score of x7: ( 1 )^2 + ( -1 )^2 = 2. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 2 ); + + /* + Calculating Gradient-based PMNR score of x8: + Symbolic bounds of output layer in terms of Layer 3: + x6 + x7 + x8 <= x9 <= x6 + x7 + x8, + -x6 - x7 - x8 <= x10 <= -x6 - x7 - x8. + x8 gradient: lower = ( 1, -1 ), upper = ( 1, -1 ), average = ( 1, -1 ). + Gradient-based PMNR score of x8: ( 1 )^2 + ( -1 )^2 = 2. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 2 ), 2 ); + } + + void test_bbps_selection_softmax2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x3, 2) for x6 (SOFTMAX). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 2 } ) ); + + // Using branching point (x4, 3) for x7 (SOFTMAX). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 1 ), + std::pair( { NLR::NeuronIndex( 1, 1 ), 3 } ) ); + + // Using branching point (x5, 0) for x8 (SOFTMAX). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 2 ), + std::pair( { NLR::NeuronIndex( 1, 2 ), 0 } ) ); + + /* + Symbolic bounds of x6 in terms of predecessor (for both branches, since range(x3) < + 0.0001): 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + + 0.4243. Concretizing x4, x5: 0.1922 x3 - 0.1248 <= x6 <= 0.1922 x3 - 0.1248. + + Symbolic bounds of x7 in terms of predecessor (for both branches, since range(x4) < + 0.0001): -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + + 0.4481. Concretizing x3, x5: 0.2078 x4 + 0.0819 <= x7 <= 0.2078 x4 + 0.0819. + + Symbolic bounds of x8 in terms of predecessor (for both branches, since range(x5) < + 0.0001): -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.0091 x3 - 0.0248 x4 + 0.0339 x5 + + 0.1277. Concretizing x3, x4: 0.0339 x5 + 0.0351 <= x8 <= 0.0339 x5 + 0.0351. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0.1922, 0.1922 } ), + Vector( { 0.1922, 0.1922 } ), + Vector( { -0.1248, -0.1248 } ), + Vector( { -0.1248, -0.1248 } ) ); + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 1 ), + Vector( { 0.2078, 0.2078 } ), + Vector( { 0.2078, 0.2078 } ), + Vector( { 0.0819, 0.0819 } ), + Vector( { 0.0819, 0.0819 } ) ); + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 2 ), + Vector( { 0.0339, 0.0339 } ), + Vector( { 0.0339, 0.0339 } ), + Vector( { 0.0351, 0.0351 } ), + Vector( { 0.0351, 0.0351 } ) ); + + /* + Calculating BBPS-based PMNR score of x6, x7, x8: + Symbolic bounds of output layer in terms of Layer 2: + x6 + x7 + x8 <= x9 <= x6 + x7 + x8 + -x6 - x7 - x8 <= x10 <= -x6 - x7 - x8 + + Because the lower/upper symbolic bounds for output layer are equal (up to ~10^-6), + and lower/upper predecessor symbolic bounds for both branches are equal, the concrete + bounds for every output neuron, every nonfixed neuron and branch are equal to DeepPoly. + Consequently, the BBSP-based PMNR scores for all neurons is 0. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 0 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 2 ), 0 ); + } + void test_symbolic_bound_maps_softmax3() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); @@ -4346,9 +5805,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, 1 ); tableau.setUpperBound( 2, 1.00001 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* Input ranges: @@ -4429,7 +5888,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -0.0019, 0.0156 } ) ) ); TS_ASSERT( compareVectors( symbolicUb, - Vector( { 0.1154, + Vector( { 0.1155, -0.1017, -0.0138, -0.1017, @@ -4487,28 +5946,26 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 2: First Sigmoid: x8 x10 x12 = softmax( x3, x5, x7 ). -0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1154 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 + 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 x8.lb = 0.2310 x0 + 0.0001 x1 + 0.2310 x2 + 0.4051 x8.ub = 0.2310 x0 + 0.0000 x1 + 0.2310 x2 + 0.4050 x8 range: [ 0.8668, 0.8668 ] --0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + 0.3170 - x10.lb = -0.2033 x0 + 0.0001 x1 - 0.2033 x2 + 0.5239 - x10.ub = -0.2033 x0 + 0.0000 x1 - 0.2033 x2 + 0.5241 - x10 range: [ 0.1173, 0.1173 ] + -0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + + 0.3170 x10.lb = -0.2033 x0 + 0.0001 x1 - 0.2033 x2 + 0.5239 x10.ub = -0.2033 x0 + 0.0000 x1 - + 0.2033 x2 + 0.5241 x10 range: [ 0.1173, 0.1173 ] --0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 - x12.lb = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 - x12.ub = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 - x12 range: [ 0.0159, 0.0159 ] + -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + + 0.0747 x12.lb = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 x12.ub = -0.0275 x0 + 0.0001 x1 - + 0.0275 x2 + 0.0708 x12 range: [ 0.0159, 0.0159 ] Second Sigmoid: x9 x11 = softmax( x4, x6 ). -0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 + 0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 x9.lb = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 x9.ub = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 x9 range: [ 0.9820, 0.0180 ] --0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 + -0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 x11.lb = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 x11.ub = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 x11 range: [ 0.9820, 0.0180 ] @@ -4579,11 +6036,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Symbolic bounds of every activation layer in terms of predecessor: Layer 2 (SOFTMAX): -0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1154 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 -0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 --0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + 0.3170 --0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 --0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 + 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 + 0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 + -0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + + 0.3170 -0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 -0.0138 x3 - + 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 Symbolic bounds of output layer in terms of every layer (backsubstitution): @@ -4602,11 +6059,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 1: Using -0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1154 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 -0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 --0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + 0.3170 --0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 --0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 + 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 + 0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 + -0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + + 0.3170 -0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 -0.0138 x3 - + 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 1 <= x13 <= 1 -1 <= x14 <= -1 1 <= x15 <= 1 @@ -4621,14 +6078,36 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0.1155, 0.0000, -0.1017, 0.0000, -0.0138, 0.0000, 0.0177, - 0.0000, -0.0177, 0.0000, -0.1017, 0.0000, 0.1035, 0.0000, - -0.0019, 0.0000, -0.0177, 0.0000, 0.0177, 0.0000, -0.0138, - 0.0000, -0.0019, 0.0000, 0.0156 } ), - Vector( { 0.1155, 0.0000, -0.1017, 0.0000, -0.0138, 0.0000, 0.0177, - 0.0000, -0.0177, 0.0000, -0.1017, 0.0000, 0.1035, 0.0000, - -0.0019, 0.0000, -0.0177, 0.0000, 0.0177, 0.0000, -0.0138, - 0.0000, -0.0019, 0.0000, 0.0156 } ), + Vector( { 0.1155, + 0.0177, + -0.1017, + -0.0177, + -0.0138, + -0.1017, + -0.0177, + 0.1035, + 0.0177, + -0.0019, + -0.0138, + 0.0000, + -0.0019, + 0.0000, + 0.0156 } ), + Vector( { 0.1155, + 0.0177, + -0.1017, + -0.0177, + -0.0138, + -0.1017, + -0.0177, + 0.1036, + 0.0177, + -0.0019, + -0.0138, + 0.0000, + -0.0019, + 0.0000, + 0.0156 } ), Vector( { 0.6084, 0.9114, 0.3170, 0.0886, 0.0747 } ), Vector( { 0.6084, 0.9114, 0.3170, 0.0886, 0.0747 } ) ); @@ -4658,10 +6137,230 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( 12, 0 ), Vector( { 1, -1, 1, -1 } ), Vector( { 1, -1, 1, -1 } ) ); + + // Non-fixed activation neurons: x8 (SOFTMAX), x9 (SOFTMAX), x10 (SOFTMAX), x11 (SOFTMAX), + // x12 (SOFTMAX). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 2, 2 ), + NLR::NeuronIndex( 2, 3 ), + NLR::NeuronIndex( 2, 4 ) } ) ); + } + + void test_gradient_selection_softmax3() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax2( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + /* + Calculating Gradient-based PMNR score of x8: + Symbolic bounds of output layer in terms of Layer 2: + x8 + x10 + x12 <= x13 <= x8 + x10 + x12, + -x8 - x10 - x12 <= x14 <= -x8 - x10 - x12, + x9 + x11 <= x15 <= x9 + x11, + -x9 - x11 <= x16 <= -x9 - x11. + x8 gradient: lower = ( 1, -1, 0, 0 ), upper = ( 1, -1, 0, 0 ), avg. = ( 1, -1, 0, 0 ). + Gradient-based PMNR score of x8: ( 1 )^2 + ( -1 )^2 + ( 0 )^2 + ( 0 )^2 = 2. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 2 ); + + /* + Calculating Gradient-based PMNR score of x9: + Symbolic bounds of output layer in terms of Layer 2: + x8 + x10 + x12 <= x13 <= x8 + x10 + x12, + -x8 - x10 - x12 <= x14 <= -x8 - x10 - x12, + x9 + x11 <= x15 <= x9 + x11, + -x9 - x11 <= x16 <= -x9 - x11. + x9 gradient: lower = ( 0, 0, 1, -1 ), upper = ( 0, 0, 1, -1 ), avg. = ( 0, 0, 1, -1 ). + Gradient-based PMNR score of x9: ( 0 )^2 + ( 0 )^2 + ( 1 )^2 + ( -1 )^2 = 2. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 2 ); + + /* + Calculating Gradient-based PMNR score of x10: + Symbolic bounds of output layer in terms of Layer 2: + x8 + x10 + x12 <= x13 <= x8 + x10 + x12, + -x8 - x10 - x12 <= x14 <= -x8 - x10 - x12, + x9 + x11 <= x15 <= x9 + x11, + -x9 - x11 <= x16 <= -x9 - x11. + x10 gradient: lower = ( 1, -1, 0, 0 ), upper = ( 1, -1, 0, 0 ), avg. = ( 1, -1, 0, 0 ). + Gradient-based PMNR score of x10: ( 1 )^2 + ( -1 )^2 + ( 0 )^2 + ( 0 )^2 = 2. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 2 ), 2 ); + + /* + Calculating Gradient-based PMNR score of x11: + Symbolic bounds of output layer in terms of Layer 2: + x8 + x10 + x12 <= x13 <= x8 + x10 + x12, + -x8 - x10 - x12 <= x14 <= -x8 - x10 - x12, + x9 + x11 <= x15 <= x9 + x11, + -x9 - x11 <= x16 <= -x9 - x11. + x11 gradient: lower = ( 0, 0, 1, -1 ), upper = ( 0, 0, 1, -1 ), avg. = ( 0, 0, 1, -1 ). + Gradient-based PMNR score of x11: ( 0 )^2 + ( 0 )^2 + ( 1 )^2 + ( -1 )^2 = 2. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 3 ), 2 ); + + /* + Calculating Gradient-based PMNR score of x12: + Symbolic bounds of output layer in terms of Layer 2: + x8 + x10 + x12 <= x13 <= x8 + x10 + x12, + -x8 - x10 - x12 <= x14 <= -x8 - x10 - x12, + x9 + x11 <= x15 <= x9 + x11, + -x9 - x11 <= x16 <= -x9 - x11. + x12 gradient: lower = ( 1, -1, 0, 0 ), upper = ( 1, -1, 0, 0 ), avg. = ( 1, -1, 0, 0 ). + Gradient-based PMNR score of x12: ( 1 )^2 + ( -1 )^2 + ( 0 )^2 + ( 0 )^2 = 2. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 4 ), 2 ); + } + + void test_bbps_selection_softmax3() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax2( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x3, 2) for x8 (SOFTMAX). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 0 ), 2 } ) ); + + // Using branching point (x4, 3) for x9 (SOFTMAX). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 1 ), + std::pair( { NLR::NeuronIndex( 1, 1 ), 3 } ) ); + + // Using branching point (x5, 0) for x10 (SOFTMAX). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 2 ), + std::pair( { NLR::NeuronIndex( 1, 2 ), 0 } ) ); + + // Using branching point (x6, -1) for x11 (SOFTMAX). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 3 ), + std::pair( { NLR::NeuronIndex( 1, 3 ), -1 } ) ); + + // Using branching point (x7, -2) for x12 (SOFTMAX). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 4 ), + std::pair( { NLR::NeuronIndex( 1, 4 ), -2 } ) ); + + /* + Symbolic bounds of x8 in terms of predecessor (for both branches, since range(x3) < + 0.0001): 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + + 0.6084. Concretizing x5, x7: 0.1155 x3 + 0.6360 <= x8 <= 0.1155 x3 + 0.6360. + + Symbolic bounds of x9 in terms of predecessor (for both branches, since range(x4) < + 0.0001): 0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114. Concretizing + x6: 0.0177 x4 + 0.9291 <= x9 <= 0.0177 x4 + 0.9291. + + Symbolic bounds of x10 in terms of predecessor (for both branches, since range(x5) < + 0.0001): -0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 + x7 + 0.3170. Concretizing x3, x7: 0.1035 x5 + 0.1174 <= x8 <= 0.1036 x5 + 0.1174. + + Symbolic bounds of x11 in terms of predecessor (for both branches, since range(x6) < + 0.0001): -0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886. Concretizing + x4: 0.0177 x4 + 0.0356 <= x11 <= 0.0177 x4 + 0.0356. + + Symbolic bounds of x12 in terms of predecessor (for both branches, since range(x7) < + 0.0001): -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 + x7 + 0.0747. Concretizing x3, x5: 0.0156 x7 + 0.0471 <= x12 <= 0.0156 x7 + 0.0471. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { 0.1155, 0.1155 } ), + Vector( { 0.1155, 0.1155 } ), + Vector( { 0.6360, 0.6360 } ), + Vector( { 0.6360, 0.6360 } ) ); + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 1 ), + Vector( { 0.0177, 0.0177 } ), + Vector( { 0.0177, 0.0177 } ), + Vector( { 0.9291, 0.9291 } ), + Vector( { 0.9291, 0.9291 } ) ); + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 2 ), + Vector( { 0.1035, 0.1035 } ), + Vector( { 0.1036, 0.1036 } ), + Vector( { 0.1174, 0.1174 } ), + Vector( { 0.1174, 0.1174 } ) ); + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 3 ), + Vector( { 0.0177, 0.0177 } ), + Vector( { 0.0177, 0.0177 } ), + Vector( { 0.0356, 0.0356 } ), + Vector( { 0.0356, 0.0356 } ) ); + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 4 ), + Vector( { 0.0156, 0.0156 } ), + Vector( { 0.0156, 0.0156 } ), + Vector( { 0.0471, 0.0471 } ), + Vector( { 0.0471, 0.0471 } ) ); + + /* + Calculating BBPS-based PMNR score of x8, x9, x10, x11, x12: + Symbolic bounds of output layer in terms of Layer 2: + x8 + x10 + x12 <= x13 <= x8 + x10 + x12 + -x8 - x10 - x12 <= x14 <= -x8 - x10 - x12 + x9 + x11 <= x15 <= x9 + x11 + -x9 - x11 <= x16 <= -x9 - x11 + + Because the lower/upper symbolic bounds for output layer are equal (up to ~10^-6), + and lower/upper predecessor symbolic bounds for both branches are equal, the concrete + bounds for every output neuron, every nonfixed neuron and branch are equal to DeepPoly. + Consequently, the BBSP-based PMNR scores for all neurons is 0. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 0 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 2 ), 0 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 3 ), 0 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 4 ), 0 ); } void test_symbolic_bound_maps_bilinear() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -4672,9 +6371,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, -2 ); tableau.setUpperBound( 1, 1 ); - // Invoke initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); /* Input ranges: @@ -4734,16 +6433,16 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Symbolic bounds of every activation layer in terms of predecessor: Layer 2 (BILINEAR): - -x2 - x3 - 1 <= x5 <= 3x2 - x3 + 3 + -x2 - x3 - 1 <= x4 <= 3x2 - x3 + 3 Symbolic bounds of output layer in terms of every layer (backsubstitution): Layer 3: - x4 <= x5 <= x4 + x5 <= x5 <= x5 Layer 2: Using x5 = -x4: - -x3 <= x4 <= -x4 + -x4 <= x4 <= -x4 Layer 1: Using -x2 - x3 - 1 <= x4 <= 3x2 - x3 + 3: @@ -4784,6 +6483,117 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 2, -1 } ), Vector( { -3 } ), Vector( { 1 } ) ); + + // Non-fixed activation neurons: x4 (BILINEAR). + compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); + } + + void test_gradient_selection_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-gradient" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + /* + Calculating Gradient-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: -x4 <= x5 <= -x4. + x4 gradient: lower = ( -1 ), upper = ( -1 ) + Gradient-based PMNR score of x4: ( ( -1 )^2 + ( -1 )^2 ) / 2 = 1. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 1 ); + } + + void test_bbps_selection_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-pmnr-bbps" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke Parameterised DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); + + // Using branching point (x3, 1/3) for x4 (BILINEAR). + compareBBPSBranchingPoints( + nlr, + NLR::NeuronIndex( 2, 0 ), + std::pair( { NLR::NeuronIndex( 1, 1 ), 0.3333 } ) ); + + /* + Coefficients for bilinear layer (lower branch, x2: [-1, 6], x3: [-1, 1/3]): + Lower bound: + alpha_l = x3.lb = -1 + beta = x2.lb = -1 + gamma_l = -x2.lb x3.lb = --1 * -1 = -1 + + Upper bound: + alpha_u = x3.ub = 1/3 + beta = x2.lb = -1 + gamma_u = -x2.lb x3.ub = --1 * 1/3 = 1/3 + + -x2 - x3 - 1 <= x4 <= 1/3 x2 - x3 + 1/3. + Concretizing x2: -x3 - 7 <= x4 <= -x3 + 7/3. + + Coefficients for bilinear layer (upper branch, x2: [-1, 6], x3: [1/3, 3]): + Lower bound: + alpha_l = x3.lb = 1/3 + beta = x2.lb = -1 + gamma_l = -x2.lb x3.lb = --1 * 1/3 = 1/3 + + Upper bound: + alpha_u = x3.ub = 3 + beta = x2.lb = -1 + gamma_u = -x2.lb x3.ub = --1 * 3 = 3 + + 1/3 x2 - x3 + 1/3 <= x4 <= 3x2 - x3 + 3. + Concretizing x2: -x3 <= x4 <= -x3 + 21. + + Lower branch symbolic bounds: -x3 - 7 <= x4 <= -x3 + 7/3. + Upper branch symbolic bounds: -x3 <= x4 <= -x3 + 21. + */ + compareBranchSymbolicBounds( nlr, + NLR::NeuronIndex( 2, 0 ), + Vector( { -1, -1 } ), + Vector( { -1, -1 } ), + Vector( { -7, 0 } ), + Vector( { 2.3333, 21 } ) ); + + /* Calculating BBPS-based PMNR score of x4: + Symbolic bounds of output layer in terms of Layer 2: -x4 <= x5 <= -x4. + + Lower branch, using x3: [-1, 1/3], -x3 - 7 <= x4 <= -x3 + 7/3: + Output symbolic bounds x3 - 7/3 <= x6 <= x3 + 7. + Concrete bounds: [-10/3, 22/3]. DeepPoly bounds: [-18, 6]. Improvement: 44/3. + + Upper branch, using x3: [1/3, 3], -x3 <= x4 <= -x3 + 21: + Output symbolic bounds x3 - 21 <= x6 <= x3. + Concrete bounds: [-62/3, 3]. DeepPoly bounds: [-18, 6]. Improvement: 3. + + Final score = ( 44/3 + 3 ) / 2 = 53/6 = 8.8333. + */ + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 8.8333 ); } void test_parameterised_symbolic_bound_maps_relus_all_active() @@ -4803,9 +6613,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -4886,8 +6696,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 1 } ), + Vector( { 1, 1 } ), Vector( { 0, 0 } ), Vector( { 0, 0 } ) ); @@ -4915,6 +6725,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 1, 2 } ), Vector( { 0 } ), Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); } void test_parameterised_symbolic_bound_maps_relus_active_and_inactive() @@ -4937,9 +6750,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -5021,8 +6834,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), Vector( { 0, 0 } ), Vector( { 0, 0 } ) ); @@ -5050,6 +6863,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { -1, -1 } ), Vector( { 0 } ), Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); } void test_parameterised_symbolic_bound_maps_relus_active_and_not_fixed() @@ -5072,9 +6888,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -5160,8 +6976,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0.5, 0, 0, 1 } ), - Vector( { 0.75, 0, 0, 1 } ), + Vector( { 0.5, 1 } ), + Vector( { 0.75, 1 } ), Vector( { 0, 0 } ), Vector( { 3, 0 } ) ); @@ -5189,6 +7005,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 0.5, 1.25 } ), Vector( { -7.5 } ), Vector( { -8.25 } ) ); + + // Non-fixed activation neurons: x4 (RELU). + compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); } void test_parameterised_symbolic_bound_maps_relus_active_and_externally_fixed() @@ -5214,9 +7033,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -5297,8 +7116,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), Vector( { 0, 0 } ), Vector( { 0, 0 } ) ); @@ -5326,6 +7145,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { -1, -1 } ), Vector( { 0 } ), Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); } void test_parameterised_symbolic_bound_maps_relu_residual1() @@ -5343,9 +7165,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -5494,6 +7316,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { -0.2143 } ), Vector( { 1.75 } ), Vector( { 5.2857 } ) ); + + // Non-fixed activation neurons: x2 (RELU), x4 (RELU). + compareNonfixedNeurons( + nlr, Set( { NLR::NeuronIndex( 2, 0 ), NLR::NeuronIndex( 4, 0 ) } ) ); } void test_parameterised_symbolic_bound_maps_relu_residual2() @@ -5511,9 +7337,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -5679,6 +7505,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { -2.2143 } ), Vector( { 1.75 } ), Vector( { 5.2857 } ) ); + + // Non-fixed activation neurons: x2 (RELU), x4 (RELU). + compareNonfixedNeurons( + nlr, Set( { NLR::NeuronIndex( 2, 0 ), NLR::NeuronIndex( 4, 0 ) } ) ); } void test_parameterised_symbolic_bound_maps_relu_reindex() @@ -5698,9 +7528,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -5841,15 +7671,15 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0, 0.5, 0.5, 0 } ), - Vector( { 0, 0.5, 0.5, 0 } ), + Vector( { 0.5, 0.5 } ), + Vector( { 0.5, 0.5 } ), Vector( { 0, 0 } ), Vector( { 1, 1 } ) ); comparePredecessorSymbolicBounds( nlr, 4, - Vector( { 0, 0.5, 0.5, 0 } ), - Vector( { 0, 0.75, 0.5, 0 } ), + Vector( { 0.5, 0.5 } ), + Vector( { 0.5, 0.75 } ), Vector( { 0, 0 } ), Vector( { 1, 0.75 } ) ); @@ -5889,64 +7719,23 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 0.75, 0, 0.5, 0.5 } ), Vector( { 1, -0.5 } ), Vector( { 4.25, 1.5 } ) ); + + // Non-fixed activation neurons: x4 (RELU), x5 (RELU), x8 (RELU), x9 (RELU). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 4, 0 ), + NLR::NeuronIndex( 4, 1 ) } ) ); } - void test_parameterised_symbolic_bound_maps_abs_all_positive() + void test_parameterised_symbolic_bound_maps_absolute_values_all_positive() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); tableau.setLowerBound( 0, 4 ); tableau.setUpperBound( 0, 6 ); @@ -5956,9 +7745,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -6038,8 +7827,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 1 } ), + Vector( { 1, 1 } ), Vector( { 0, 0 } ), Vector( { 0, 0 } ) ); @@ -6066,65 +7855,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 1, 2 } ), Vector( { 1, 2 } ), Vector( { 0 } ), - Vector( { 0 } ) ); - } - - void test_parameterised_symbolic_bound_maps_abs_positive_and_negative() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); + } + + void test_parameterised_symbolic_bound_maps_absolute_values_positive_and_negative() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); tableau.setLowerBound( 0, 4 ); tableau.setUpperBound( 0, 6 ); @@ -6137,9 +7881,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -6219,8 +7963,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { -1, 0, 0, 1 } ), - Vector( { -1, 0, 0, 1 } ), + Vector( { -1, 1 } ), + Vector( { -1, 1 } ), Vector( { 0, 0 } ), Vector( { 0, 0 } ) ); @@ -6248,6 +7992,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { -3, -4 } ), Vector( { 30 } ), Vector( { 30 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); } void test_parameterised_symbolic_bound_maps_absolute_values_positive_and_not_fixed() @@ -6256,56 +8003,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); tableau.setLowerBound( 0, 4 ); tableau.setUpperBound( 0, 6 ); @@ -6318,9 +8017,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -6404,8 +8103,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), Vector( { 0, 0 } ), Vector( { 12, 0 } ) ); @@ -6433,6 +8132,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { -1, -1 } ), Vector( { 0 } ), Vector( { 12 } ) ); + + // Non-fixed activation neurons: x4 (ABSOLUTE_VALUE). + compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); } void test_parameterised_symbolic_bound_maps_absolute_values_active_and_externally_fixed() @@ -6441,56 +8143,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + populateNetworkSBTAbsoluteValue( nlr, tableau ); tableau.setLowerBound( 0, 4 ); tableau.setUpperBound( 0, 6 ); @@ -6506,9 +8160,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -6591,8 +8245,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { -1, 0, 0, 1 } ), - Vector( { -1, 0, 0, 1 } ), + Vector( { -1, 1 } ), + Vector( { -1, 1 } ), Vector( { 0, 0 } ), Vector( { 0, 0 } ) ); @@ -6620,6 +8274,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { -1, -1 } ), Vector( { 3 } ), Vector( { 3 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); } void test_parameterised_symbolic_bound_maps_signs_positive_and_not_fixed() @@ -6628,56 +8285,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + populateNetworkSBTSign( nlr, tableau ); tableau.setLowerBound( 0, 4 ); tableau.setUpperBound( 0, 6 ); @@ -6690,9 +8299,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -6782,8 +8391,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0.0833, 0, 0, 0 } ), - Vector( { 0.25, 0, 0, 0 } ), + Vector( { 0.0833, 0 } ), + Vector( { 0.25, 0 } ), Vector( { -1, 1 } ), Vector( { 1, 1 } ) ); @@ -6811,6 +8420,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 0.5, 0.75 } ), Vector( { -3.25 } ), Vector( { -3.75 } ) ); + + // Non-fixed activation neurons: x4 (SIGN). + compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); } void test_parameterised_symbolic_bound_maps_signs_active_and_externally_fixed() @@ -6819,56 +8431,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + populateNetworkSBTSign( nlr, tableau ); tableau.setLowerBound( 0, 4 ); tableau.setUpperBound( 0, 6 ); @@ -6884,9 +8448,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -6963,8 +8527,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0, 0, 0, 0 } ), - Vector( { 0, 0, 0, 0 } ), + Vector( { 0, 0 } ), + Vector( { 0, 0 } ), Vector( { -1, 1 } ), Vector( { -1, 1 } ) ); @@ -6992,10 +8556,15 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 0, 0 } ), Vector( { -2 } ), Vector( { -2 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); } void test_parameterised_symbolic_bound_maps_leaky_relu() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -7009,9 +8578,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -7173,15 +8742,15 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0.6, 0, 0, 0.6 } ), - Vector( { 0.6, 0, 0, 0.6 } ), + Vector( { 0.6, 0.6 } ), + Vector( { 0.6, 0.6 } ), Vector( { 0, 0 } ), Vector( { 0.8, 0.8 } ) ); comparePredecessorSymbolicBounds( nlr, 4, - Vector( { 0.6, 0, 0, 0.6 } ), - Vector( { 0.76, 0, 0, 0.6 } ), + Vector( { 0.6, 0.6 } ), + Vector( { 0.76, 0.6 } ), Vector( { 0, 0 } ), Vector( { 0.672, 0.8 } ) ); @@ -7221,6 +8790,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 0.912, 0, 0.72, 0.72 } ), Vector( { 1, -0.48 } ), Vector( { 3.688, 1.28 } ) ); + + // Non-fixed activation neurons: x4 (LEAKY_RELU), x5 (LEAKY_RELU), x8 (LEAKY_RELU), x9 + // (LEAKY_RELU). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 4, 0 ), + NLR::NeuronIndex( 4, 1 ) } ) ); } void test_parameterised_symbolic_bound_maps_sigmoids_and_round() @@ -7240,9 +8817,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); @@ -7298,7 +8875,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x4 >= lambda7_prime * x2 + ( g(l3) - lambda7_prime * l3 ) x4 <= lambda7_prime * x2 + ( g(u3) - lambda7_prime * u3 ) x5 >= lambda8_prime * x3 + ( g(l4) - lambda8_prime * l4 ) - x5 <= lambda8_prime * x3 + ( g(u4) - lambda7_prime * u4 ) + x5 <= lambda8_prime * x3 + ( g(u4) - lambda8_prime * u4 ) ''' print('------------------') print(lambda7_prime) @@ -7308,7 +8885,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite print(g(l4) - lambda8_prime * l4) print(g(u4) - lambda8_prime * u4) - --- [output]: 0.4483930148512481 @@ -7375,14 +8951,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0.1050, 0, 0, 0.1050 } ), - Vector( { 0.1050, 0, 0, 0.1050 } ), + Vector( { 0.1050, 0.1050 } ), + Vector( { 0.1050, 0.1050 } ), Vector( { 0.3292, 0.3292 } ), Vector( { 0.6708, 0.6708 } ) ); comparePredecessorSymbolicBounds( nlr, 4, - Vector( { 1, 0, 0, 1 } ), - Vector( { 1, 0, 0, 1 } ), + Vector( { 1, 1 } ), + Vector( { 1, 1 } ), Vector( { -0.5, -0.5 } ), Vector( { 0.5, 0.5 } ) ); @@ -7416,6 +8992,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 0.2100, 0, 0, 0.2100 } ), Vector( { 0.1584, -0.8416 } ), Vector( { 1.8416, 0.8416 } ) ); + + // Non-fixed activation neurons: x4 (SIGMOID), x5 (SIGMOID), x8 (ROUND), x9 (ROUND). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 4, 0 ), + NLR::NeuronIndex( 4, 1 ) } ) ); } void test_parameterised_symbolic_bound_maps_max_not_fixed() @@ -7435,9 +9018,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -7540,8 +9123,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0.5, 0, 0, 0.5 } ), - Vector( { 0.6, 0, 0, 0.4 } ), + Vector( { 0.5, 0.5 } ), + Vector( { 0.6, 0.4 } ), Vector( { 0, 0 } ), Vector( { 1.2, 1.2 } ) ); comparePredecessorSymbolicBounds( nlr, @@ -7581,6 +9164,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 0, 0 } ), Vector( { 0 } ), Vector( { 6 } ) ); + + // Non-fixed activation neurons: x4 (RELU), x5 (RELU), x6 (MAX). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 3, 0 ) } ) ); } void test_parameterised_symbolic_bound_maps_max_fixed() @@ -7600,9 +9189,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -7698,8 +9287,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0, 0, 0, 1 } ), - Vector( { 0, 0, 0, 1 } ), + Vector( { 0, 1 } ), + Vector( { 0, 1 } ), Vector( { 0, 0 } ), Vector( { 0, 0 } ) ); comparePredecessorSymbolicBounds( nlr, @@ -7739,6 +9328,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 2, -2 } ), Vector( { 0 } ), Vector( { 0 } ) ); + + // Non-fixed activation neurons: None. + compareNonfixedNeurons( nlr, Set( {} ) ); } void test_parameterised_symbolic_bound_maps_softmax1() @@ -7760,9 +9352,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); } void test_parameterised_symbolic_bound_maps_softmax2() @@ -7786,9 +9378,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -7876,17 +9468,17 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite /* Layer 2: -0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 x6 range: [ 0.2595, 0.2595 ] --0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 x7 range: [ 0.7054, 0.7054 ] --0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277 + -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 x8 range: [ 0.0351, 0.0351 ] @@ -7941,9 +9533,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Symbolic bounds of every activation layer in terms of predecessor: Layer 2 (SOFTMAX): -0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 --0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 --0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 Symbolic bounds of output layer in terms of every layer (backsubstitution): @@ -7958,11 +9550,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 1: Using -0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243. --0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481. --0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277: - 1 <= x9 <= 1 - -1 <= x10 <= -1 + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243. + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + + 0.4481. -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + + 0.1277: 1 <= x9 <= 1 -1 <= x10 <= -1 Layer 0: 1 <= x9 <= 1 @@ -8015,6 +9606,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 0, 0, 0, 0, 0, 0 } ), Vector( { 1, -1 } ), Vector( { 1, -1 } ) ); + + // Non-fixed activation neurons: x6 (SOFTMAX), x7 (SOFTMAX), x8 (SOFTMAX). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 2, 2 ) } ) ); } { Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); @@ -8033,9 +9630,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -8123,17 +9720,17 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite /* Layer 2: -0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 x6.lb = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 x6.ub = 0.3843 x0 - 0.3661 x1 + 0.0183 x2 + 0.2232 x6 range: [ 0.2595, 0.2595 ] --0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4480 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 x7.lb = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6062 x7.ub = -0.3660 x0 - 0.4156 x1 + 0.0496 x2 + 0.6063 x7 range: [ 0.7054, 0.7054 ] --0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277 + -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 x8.lb = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 x8.ub = -0.0182 x0 - 0.0496 x1 - 0.0678 x2 + 0.1707 x8 range: [ 0.0351, 0.0351 ] @@ -8187,9 +9784,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Symbolic bounds of every activation layer in terms of predecessor: Layer 2 (SOFTMAX): -0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 --0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 --0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 + -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 Symbolic bounds of output layer in terms of every layer (backsubstitution): @@ -8204,11 +9801,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 1: Using -0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243. --0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481. --0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= 0.1922 x3 -0.0248 x4 + 0.0339 x5 + 0.1277: - 1 <= x9 <= 1 - -1 <= x10 <= -1 + 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243 <= x6 <= 0.1922 x3 - 0.1830 x4 - 0.0091 x5 + 0.4243. + -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + 0.4481 <= x7 <= -0.1830 x3 + 0.2078 x4 - 0.0248 x5 + + 0.4481. -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + 0.1277 <= x8 <= -0.0091 x3 - 0.0248 x4 + 0.0339 x5 + + 0.1277: 1 <= x9 <= 1 -1 <= x10 <= -1 Layer 0: 1 <= x9 <= 1 @@ -8261,6 +9857,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { 0, 0, 0, 0, 0, 0 } ), Vector( { 1, -1 } ), Vector( { 1, -1 } ) ); + + // Non-fixed activation neurons: x6 (SOFTMAX), x7 (SOFTMAX), x8 (SOFTMAX). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 2, 2 ) } ) ); } } @@ -8284,9 +9886,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -8367,7 +9969,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -0.0019, 0.0156 } ) ) ); TS_ASSERT( compareVectors( symbolicUb, - Vector( { 0.1154, + Vector( { 0.1155, -0.1017, -0.0138, -0.1017, @@ -8425,28 +10027,26 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 2: First Sigmoid: x8 x10 x12 = softmax( x3, x5, x7 ). -0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1154 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 + 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 x8.lb = 0.2310 x0 + 0.0001 x1 + 0.2310 x2 + 0.4051 x8.ub = 0.2310 x0 + 0.0000 x1 + 0.2310 x2 + 0.4050 x8 range: [ 0.8668, 0.8668 ] --0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + 0.3170 - x10.lb = -0.2033 x0 + 0.0001 x1 - 0.2033 x2 + 0.5239 - x10.ub = -0.2033 x0 + 0.0000 x1 - 0.2033 x2 + 0.5241 - x10 range: [ 0.1173, 0.1173 ] + -0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + + 0.3170 x10.lb = -0.2033 x0 + 0.0001 x1 - 0.2033 x2 + 0.5239 x10.ub = -0.2033 x0 + 0.0000 x1 - + 0.2033 x2 + 0.5241 x10 range: [ 0.1173, 0.1173 ] --0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 - x12.lb = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 - x12.ub = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 - x12 range: [ 0.0159, 0.0159 ] + -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + + 0.0747 x12.lb = -0.0275 x0 + 0.0001 x1 - 0.0275 x2 + 0.0708 x12.ub = -0.0275 x0 + 0.0001 x1 - + 0.0275 x2 + 0.0708 x12 range: [ 0.0159, 0.0159 ] Second Sigmoid: x9 x11 = softmax( x4, x6 ). -0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 + 0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 x9.lb = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 x9.ub = 0 x0 + 0.0354 x1 + 0.0354 x2 + 0.9114 x9 range: [ 0.9820, 0.0180 ] --0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 + -0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 x11.lb = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 x11.ub = 0 x0 - 0.0354 x1 - 0.0354 x2 + 0.0886 x11 range: [ 0.9820, 0.0180 ] @@ -8517,11 +10117,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Symbolic bounds of every activation layer in terms of predecessor: Layer 2 (SOFTMAX): -0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1154 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 -0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 --0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + 0.3170 --0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 --0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 + 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 + 0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 + -0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + + 0.3170 -0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 -0.0138 x3 - + 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 Symbolic bounds of output layer in terms of every layer (backsubstitution): @@ -8540,15 +10140,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 1: Using -0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1154 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 -0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 --0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + 0.3170 --0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 --0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 - 1 <= x13 <= 1 - -1 <= x14 <= -1 - 1 <= x15 <= 1 - -1 <= x16 <= -1 + 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 <= x8 <= 0.1155 x3 - 0.1017 x5 - 0.0138 x7 + 0.6084 + 0.0177 x4 - 0.0177 x6 + 0.9114 <= x9 <= 0.0177 x4 - 0.0177 x6 + 0.9114 + -0.1017 x3 + 0.1035 x5 - 0.0019 x7 + 0.3170 <= x10 <= -0.1017 x3 + 0.1036 x5 - 0.0019 x7 + + 0.3170 -0.0177 x4 + 0.0177 x6 + 0.0886 <= x11 <= -0.0177 x4 + 0.0177 x6 + 0.0886 -0.0138 x3 - + 0.0019 x5 + 0.0156 x7 + 0.0747 <= x12 <= -0.0138 x3 - 0.0019 x5 + 0.0156 x7 + 0.0747 1 <= x13 <= + 1 -1 <= x14 <= -1 1 <= x15 <= 1 -1 <= x16 <= -1 Layer 0: 1 <= x13 <= 1 @@ -8559,14 +10156,36 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite comparePredecessorSymbolicBounds( nlr, 2, - Vector( { 0.1155, 0.0000, -0.1017, 0.0000, -0.0138, 0.0000, 0.0177, - 0.0000, -0.0177, 0.0000, -0.1017, 0.0000, 0.1035, 0.0000, - -0.0019, 0.0000, -0.0177, 0.0000, 0.0177, 0.0000, -0.0138, - 0.0000, -0.0019, 0.0000, 0.0156 } ), - Vector( { 0.1155, 0.0000, -0.1017, 0.0000, -0.0138, 0.0000, 0.0177, - 0.0000, -0.0177, 0.0000, -0.1017, 0.0000, 0.1035, 0.0000, - -0.0019, 0.0000, -0.0177, 0.0000, 0.0177, 0.0000, -0.0138, - 0.0000, -0.0019, 0.0000, 0.0156 } ), + Vector( { 0.1155, + 0.0177, + -0.1017, + -0.0177, + -0.0138, + -0.1017, + -0.0177, + 0.1035, + 0.0177, + -0.0019, + -0.0138, + 0.0000, + -0.0019, + 0.0000, + 0.0156 } ), + Vector( { 0.1155, + 0.0177, + -0.1017, + -0.0177, + -0.0138, + -0.1017, + -0.0177, + 0.1036, + 0.0177, + -0.0019, + -0.0138, + 0.0000, + -0.0019, + 0.0000, + 0.0156 } ), Vector( { 0.6084, 0.9114, 0.3170, 0.0886, 0.0747 } ), Vector( { 0.6084, 0.9114, 0.3170, 0.0886, 0.0747 } ) ); @@ -8596,10 +10215,21 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( 12, 0 ), Vector( { 1, -1, 1, -1 } ), Vector( { 1, -1, 1, -1 } ) ); + + // Non-fixed activation neurons: x8 (SOFTMAX), x9 (SOFTMAX), x10 (SOFTMAX), x11 (SOFTMAX), + // x12 (SOFTMAX). + compareNonfixedNeurons( nlr, + Set( { NLR::NeuronIndex( 2, 0 ), + NLR::NeuronIndex( 2, 1 ), + NLR::NeuronIndex( 2, 2 ), + NLR::NeuronIndex( 2, 3 ), + NLR::NeuronIndex( 2, 4 ) } ) ); } void test_parameterised_symbolic_bound_maps_bilinear() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -8613,9 +10243,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite unsigned paramCount = nlr.getNumberOfParameters(); Vector coeffs( paramCount, 0.5 ); - // Invoke parameterised initializeSymbolicBoundsMaps + // Invoke Parameterised DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.initializeSymbolicBoundsMaps( coeffs ) ); + TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true, coeffs ) ); /* Input ranges: @@ -8685,7 +10315,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Symbolic bounds of output layer in terms of every layer (backsubstitution): Layer 3: - x4 <= x5 <= x4 + x5 <= x5 <= x5 Layer 2: Using x5 = -x4: @@ -8731,6 +10361,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { -3.5, -0.5 } ), Vector( { -4.5 } ), Vector( { 9.5 } ) ); + + // Non-fixed activation neurons: x4 (BILINEAR). + compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); } bool boundsEqual( const List &bounds, const List &expectedBounds ) @@ -8772,10 +10405,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void compareOutputSymbolicBounds( NLR::NetworkLevelReasoner &nlr, unsigned layerIndex, - const Vector &symbolicLb, - const Vector &symbolicUb, - const Vector &symbolicLowerBias, - const Vector &symbolicUpperBias ) + const Vector &expectedSymbolicLb, + const Vector &expectedSymbolicUb, + const Vector &expectedSymbolicLowerBias, + const Vector &expectedSymbolicUpperBias ) { Vector outputSymbolicLb; Vector outputSymbolicUb; @@ -8787,18 +10420,18 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.getOutputSymbolicLowerBias( layerIndex ) ); TS_ASSERT_THROWS_NOTHING( outputSymbolicUpperBias = nlr.getOutputSymbolicUpperBias( layerIndex ) ); - TS_ASSERT( compareVectors( outputSymbolicLb, symbolicLb ) ); - TS_ASSERT( compareVectors( outputSymbolicUb, symbolicUb ) ); - TS_ASSERT( compareVectors( outputSymbolicLowerBias, symbolicLowerBias ) ); - TS_ASSERT( compareVectors( outputSymbolicUpperBias, symbolicUpperBias ) ); + TS_ASSERT( compareVectors( outputSymbolicLb, expectedSymbolicLb ) ); + TS_ASSERT( compareVectors( outputSymbolicUb, expectedSymbolicUb ) ); + TS_ASSERT( compareVectors( outputSymbolicLowerBias, expectedSymbolicLowerBias ) ); + TS_ASSERT( compareVectors( outputSymbolicUpperBias, expectedSymbolicUpperBias ) ); } void comparePredecessorSymbolicBounds( NLR::NetworkLevelReasoner &nlr, unsigned layerIndex, - const Vector &symbolicLb, - const Vector &symbolicUb, - const Vector &symbolicLowerBias, - const Vector &symbolicUpperBias ) + const Vector &expectedSymbolicLb, + const Vector &expectedSymbolicUb, + const Vector &expectedSymbolicLowerBias, + const Vector &expectedSymbolicUpperBias ) { Vector predecessorSymbolicLb; Vector predecessorSymbolicUb; @@ -8812,23 +10445,79 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.getPredecessorSymbolicLowerBias( layerIndex ) ); TS_ASSERT_THROWS_NOTHING( predecessorSymbolicUpperBias = nlr.getPredecessorSymbolicUpperBias( layerIndex ) ); - TS_ASSERT( compareVectors( predecessorSymbolicLb, symbolicLb ) ); - TS_ASSERT( compareVectors( predecessorSymbolicUb, symbolicUb ) ); - TS_ASSERT( compareVectors( predecessorSymbolicLowerBias, symbolicLowerBias ) ); - TS_ASSERT( compareVectors( predecessorSymbolicUpperBias, symbolicUpperBias ) ); + TS_ASSERT( compareVectors( predecessorSymbolicLb, expectedSymbolicLb ) ); + TS_ASSERT( compareVectors( predecessorSymbolicUb, expectedSymbolicUb ) ); + TS_ASSERT( compareVectors( predecessorSymbolicLowerBias, expectedSymbolicLowerBias ) ); + TS_ASSERT( compareVectors( predecessorSymbolicUpperBias, expectedSymbolicUpperBias ) ); } - void comparePMNRScores( NLR::NetworkLevelReasoner &nlr, - const Map &neuronScores ) + void compareBranchSymbolicBounds( NLR::NetworkLevelReasoner &nlr, + NLR::NeuronIndex index, + const Vector &expectedSymbolicLb, + const Vector &expectedSymbolicUb, + const Vector &expectedSymbolicLowerBias, + const Vector &expectedSymbolicUpperBias ) + { + Vector branchSymbolicLb; + Vector branchSymbolicUb; + Vector branchSymbolicLowerBias; + Vector branchSymbolicUpperBias; + TS_ASSERT_THROWS_NOTHING( branchSymbolicLb = nlr.getSymbolicLbPerBranch( index ) ); + TS_ASSERT_THROWS_NOTHING( branchSymbolicUb = nlr.getSymbolicUbPerBranch( index ) ); + TS_ASSERT_THROWS_NOTHING( branchSymbolicLowerBias = + nlr.getSymbolicLowerBiasPerBranch( index ) ); + TS_ASSERT_THROWS_NOTHING( branchSymbolicUpperBias = + nlr.getSymbolicUpperBiasPerBranch( index ) ); + TS_ASSERT( compareVectors( branchSymbolicLb, expectedSymbolicLb ) ); + TS_ASSERT( compareVectors( branchSymbolicUb, expectedSymbolicUb ) ); + TS_ASSERT( compareVectors( branchSymbolicLowerBias, expectedSymbolicLowerBias ) ); + TS_ASSERT( compareVectors( branchSymbolicUpperBias, expectedSymbolicUpperBias ) ); + } + + void compareNonfixedNeurons( NLR::NetworkLevelReasoner &nlr, + const Set &expectedIndices ) { - double PMNRScore; - for ( const auto &pair : neuronScores ) + Set indices; + for ( const auto &pair : nlr.getLayerIndexToLayer() ) + { + Vector nonfixedNeurons; + TS_ASSERT_THROWS_NOTHING( nonfixedNeurons = pair.second->getNonfixedNeurons() ); + for ( const auto index : nonfixedNeurons ) + { + indices.insert( index ); + } + } + + TS_ASSERT_EQUALS( indices.size(), expectedIndices.size() ); + for ( const auto index : indices ) { - TS_ASSERT_THROWS_NOTHING( PMNRScore = nlr.getPMNRScore( pair.first ) ); - TS_ASSERT( FloatUtils::areEqual( PMNRScore, pair.second, 0.0001 ) ); + TS_ASSERT( expectedIndices.exists( index ) ); } } + void + compareBBPSBranchingPoints( NLR::NetworkLevelReasoner &nlr, + NLR::NeuronIndex index, + const std::pair &expectedBranchingPoint ) + { + std::pair point; + TS_ASSERT_THROWS_NOTHING( point = nlr.getBBPSBranchingPoint( index ) ); + TS_ASSERT( FloatUtils::areEqual( + point.first._layer, expectedBranchingPoint.first._layer, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + point.first._neuron, expectedBranchingPoint.first._neuron, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( point.second, expectedBranchingPoint.second, 0.0001 ) ); + } + + void comparePMNRScores( NLR::NetworkLevelReasoner &nlr, + NLR::NeuronIndex index, + double expectedScore ) + { + double score = 0; + TS_ASSERT_THROWS_NOTHING( score = nlr.getPMNRScore( index ) ); + TS_ASSERT( FloatUtils::areEqual( score, expectedScore, 0.0001 ) ); + } + bool compareVectors( const Vector &vectorA, const Vector &vectorB ) { if ( vectorA.size() != vectorB.size() ) diff --git a/src/proofs/Checker.cpp b/src/proofs/Checker.cpp index c153de7feb..afc2e98a3e 100644 --- a/src/proofs/Checker.cpp +++ b/src/proofs/Checker.cpp @@ -211,6 +211,12 @@ bool Checker::checkAllPLCExplanations( const UnsatCertificateNode *node, double // NOTE, this will change as PLCLemma will for ( const auto &plcLemma : node->getPLCLemmas() ) { + if ( !plcLemma ) + continue; + + DEBUG( + ASSERT( !GlobalConfiguration::ANALYZE_PROOF_DEPENDENCIES || plcLemma->getToCheck() ) ); + PiecewiseLinearConstraint *matchedConstraint = NULL; Tightening::BoundType affectedVarBound = plcLemma->getAffectedVarBound(); double explainedBound = affectedVarBound == Tightening::UB ? FloatUtils::infinity() diff --git a/src/proofs/PlcLemma.cpp b/src/proofs/PlcLemma.cpp index c4edcca92d..796f90b1a9 100644 --- a/src/proofs/PlcLemma.cpp +++ b/src/proofs/PlcLemma.cpp @@ -14,19 +14,24 @@ #include "PlcLemma.h" +#include "Debug.h" + PLCLemma::PLCLemma( const List &causingVars, unsigned affectedVar, double bound, Tightening::BoundType causingVarBound, Tightening::BoundType affectedVarBound, const Vector &explanations, - PiecewiseLinearFunctionType constraintType ) + PiecewiseLinearFunctionType constraintType, + double minTargetBound ) : _causingVars( causingVars ) , _affectedVar( affectedVar ) , _bound( bound ) , _causingVarBound( causingVarBound ) , _affectedVarBound( affectedVarBound ) , _constraintType( constraintType ) + , _toCheck( false ) + , _minTargetBound( minTargetBound ) { if ( explanations.empty() ) _explanations = List(); @@ -96,3 +101,18 @@ PiecewiseLinearFunctionType PLCLemma::getConstraintType() const { return _constraintType; } + +bool PLCLemma::getToCheck() const +{ + return _toCheck; +} + +double PLCLemma::getMinTargetBound() const +{ + return _minTargetBound; +} + +void PLCLemma::setToCheck() +{ + _toCheck = true; +} diff --git a/src/proofs/PlcLemma.h b/src/proofs/PlcLemma.h index 909752489e..10884888bb 100644 --- a/src/proofs/PlcLemma.h +++ b/src/proofs/PlcLemma.h @@ -15,7 +15,6 @@ #ifndef __PlcExplanation_h__ #define __PlcExplanation_h__ -#include "PiecewiseLinearConstraint.h" #include "PiecewiseLinearFunctionType.h" #include "SparseUnsortedList.h" #include "Tightening.h" @@ -33,7 +32,8 @@ class PLCLemma Tightening::BoundType causingVarBound, Tightening::BoundType affectedVarBound, const Vector &explanation, - PiecewiseLinearFunctionType constraintType ); + PiecewiseLinearFunctionType constraintType, + double minTargetBound ); ~PLCLemma(); @@ -47,6 +47,10 @@ class PLCLemma Tightening::BoundType getAffectedVarBound() const; const List &getExplanations() const; PiecewiseLinearFunctionType getConstraintType() const; + bool getToCheck() const; + double getMinTargetBound() const; + + void setToCheck(); private: const List _causingVars; @@ -56,6 +60,8 @@ class PLCLemma Tightening::BoundType _affectedVarBound; List _explanations; PiecewiseLinearFunctionType _constraintType; + bool _toCheck; + double _minTargetBound; }; #endif //__PlcExplanation_h__ diff --git a/src/proofs/UnsatCertificateNode.cpp b/src/proofs/UnsatCertificateNode.cpp index 2d1c094509..f42c7f3263 100644 --- a/src/proofs/UnsatCertificateNode.cpp +++ b/src/proofs/UnsatCertificateNode.cpp @@ -158,3 +158,11 @@ bool UnsatCertificateNode::isValidNonLeaf() const { return !_contradiction && !_children.empty(); } + +void UnsatCertificateNode::deleteUnusedLemmas() +{ + if ( GlobalConfiguration::ANALYZE_PROOF_DEPENDENCIES ) + for ( auto &lemma : _PLCExplanations ) + if ( lemma && !lemma->getToCheck() ) + lemma = nullptr; +} diff --git a/src/proofs/UnsatCertificateNode.h b/src/proofs/UnsatCertificateNode.h index 42d5048086..9963f85f8c 100644 --- a/src/proofs/UnsatCertificateNode.h +++ b/src/proofs/UnsatCertificateNode.h @@ -128,6 +128,11 @@ class UnsatCertificateNode */ bool isValidNonLeaf() const; + /* + Deletes all unused lemmas + */ + void deleteUnusedLemmas(); + private: List _children; UnsatCertificateNode *_parent; diff --git a/src/proofs/UnsatCertificateUtils.cpp b/src/proofs/UnsatCertificateUtils.cpp index 88f4569451..f540859629 100644 --- a/src/proofs/UnsatCertificateUtils.cpp +++ b/src/proofs/UnsatCertificateUtils.cpp @@ -24,9 +24,6 @@ double UNSATCertificateUtils::computeBound( unsigned var, { ASSERT( var < numberOfVariables ); - double derivedBound = 0; - double temp; - if ( explanation.empty() ) return isUpper ? groundUpperBounds[var] : groundLowerBounds[var]; @@ -47,31 +44,17 @@ double UNSATCertificateUtils::computeBound( unsigned var, UNSATCertificateUtils::getExplanationRowCombination( var, explanation, explanationRowCombination, initialTableau, numberOfVariables ); - // Set the bound derived from the linear combination, using original bounds. - for ( unsigned i = 0; i < numberOfVariables; ++i ) - { - temp = explanationRowCombination[i]; - if ( !FloatUtils::isZero( temp ) ) - { - if ( isUpper ) - temp *= FloatUtils::isPositive( explanationRowCombination[i] ) - ? groundUpperBounds[i] - : groundLowerBounds[i]; - else - temp *= FloatUtils::isPositive( explanationRowCombination[i] ) - ? groundLowerBounds[i] - : groundUpperBounds[i]; - - if ( !FloatUtils::isZero( temp ) ) - derivedBound += temp; - } - } - - return derivedBound; + return isUpper ? computeCombinationUpperBound( explanationRowCombination, + groundUpperBounds, + groundLowerBounds, + numberOfVariables ) + : computeCombinationLowerBound( explanationRowCombination, + groundUpperBounds, + groundLowerBounds, + numberOfVariables ); } -void UNSATCertificateUtils::getExplanationRowCombination( unsigned var, - const SparseUnsortedList &explanation, +void UNSATCertificateUtils::getExplanationRowCombination( const SparseUnsortedList &explanation, Vector &explanationRowCombination, const SparseMatrix *initialTableau, unsigned numberOfVariables ) @@ -100,7 +83,16 @@ void UNSATCertificateUtils::getExplanationRowCombination( unsigned var, if ( FloatUtils::isZero( explanationRowCombination[i] ) ) explanationRowCombination[i] = 0; } +} +void UNSATCertificateUtils::getExplanationRowCombination( unsigned var, + const SparseUnsortedList &explanation, + Vector &explanationRowCombination, + const SparseMatrix *initialTableau, + unsigned numberOfVariables ) +{ + UNSATCertificateUtils::getExplanationRowCombination( + explanation, explanationRowCombination, initialTableau, numberOfVariables ); // Since: 0 = Sum (ci * xi) + c * var = Sum (ci * xi) + (c + 1) * var - var // We have: var = Sum (ci * xi) + (c + 1) * var ++explanationRowCombination[var]; @@ -131,17 +123,31 @@ double UNSATCertificateUtils::computeCombinationUpperBound( const SparseUnsorted } } + return computeCombinationUpperBound( + explanationRowCombination, groundUpperBounds, groundLowerBounds, numberOfVariables ); +} + +const Set UNSATCertificateUtils::getSupportedActivations() +{ + return { RELU, SIGN, ABSOLUTE_VALUE, MAX, DISJUNCTION, LEAKY_RELU }; +} + +double UNSATCertificateUtils::computeCombinationUpperBound( const Vector &combination, + const double *groundUpperBounds, + const double *groundLowerBounds, + unsigned numberOfVariables ) +{ double derivedBound = 0; double temp; // Set the bound derived from the linear combination, using original bounds. for ( unsigned i = 0; i < numberOfVariables; ++i ) { - temp = explanationRowCombination[i]; + temp = combination[i]; if ( !FloatUtils::isZero( temp ) ) { - temp *= FloatUtils::isPositive( explanationRowCombination[i] ) ? groundUpperBounds[i] - : groundLowerBounds[i]; + temp *= FloatUtils::isPositive( combination[i] ) ? groundUpperBounds[i] + : groundLowerBounds[i]; if ( !FloatUtils::isZero( temp ) ) derivedBound += temp; @@ -151,7 +157,27 @@ double UNSATCertificateUtils::computeCombinationUpperBound( const SparseUnsorted return derivedBound; } -const Set UNSATCertificateUtils::getSupportedActivations() +double UNSATCertificateUtils::computeCombinationLowerBound( const Vector &combination, + const double *groundUpperBounds, + const double *groundLowerBounds, + unsigned numberOfVariables ) { - return { RELU, SIGN, ABSOLUTE_VALUE, MAX, DISJUNCTION, LEAKY_RELU }; + double derivedBound = 0; + double temp; + + // Set the bound derived from the linear combination, using original bounds. + for ( unsigned i = 0; i < numberOfVariables; ++i ) + { + temp = combination[i]; + if ( !FloatUtils::isZero( temp ) ) + { + temp *= FloatUtils::isNegative( combination[i] ) ? groundUpperBounds[i] + : groundLowerBounds[i]; + + if ( !FloatUtils::isZero( temp ) ) + derivedBound += temp; + } + } + + return derivedBound; } diff --git a/src/proofs/UnsatCertificateUtils.h b/src/proofs/UnsatCertificateUtils.h index c599758c7c..39446e3638 100644 --- a/src/proofs/UnsatCertificateUtils.h +++ b/src/proofs/UnsatCertificateUtils.h @@ -36,6 +36,13 @@ class UNSATCertificateUtils const double *groundLowerBounds, unsigned numberOfVariables ); + /* + Given a tableau and a column vector, create a linear combination based on the vector + */ + static void getExplanationRowCombination( const SparseUnsortedList &explanation, + Vector &explanationRowCombination, + const SparseMatrix *initialTableau, + unsigned numberOfVariables ); /* Given a var, a tableau and a column vector, create a linear combination used to explain a bound @@ -52,6 +59,16 @@ class UNSATCertificateUtils const double *groundLowerBounds, unsigned numberOfVariables ); + static double computeCombinationUpperBound( const Vector &combination, + const double *groundUpperBounds, + const double *groundLowerBounds, + unsigned numberOfVariables ); + + static double computeCombinationLowerBound( const Vector &combination, + const double *groundUpperBounds, + const double *groundLowerBounds, + unsigned numberOfVariables ); + static const Set getSupportedActivations(); }; diff --git a/src/proofs/tests/Test_UnsatCertificateNode.h b/src/proofs/tests/Test_UnsatCertificateNode.h index 75a1d250a2..ca18bc8608 100644 --- a/src/proofs/tests/Test_UnsatCertificateNode.h +++ b/src/proofs/tests/Test_UnsatCertificateNode.h @@ -83,11 +83,11 @@ class UnsatCertificateNodeTestSuite : public CxxTest::TestSuite Vector emptyVec; auto explanation1 = std::shared_ptr( - new PLCLemma( { 1 }, 1, 0, Tightening::UB, Tightening::UB, emptyVec, RELU ) ); + new PLCLemma( { 1 }, 1, 0, Tightening::UB, Tightening::UB, emptyVec, RELU, 0 ) ); auto explanation2 = std::shared_ptr( - new PLCLemma( { 1 }, 1, -1, Tightening::UB, Tightening::UB, emptyVec, RELU ) ); + new PLCLemma( { 1 }, 1, -1, Tightening::UB, Tightening::UB, emptyVec, RELU, 0 ) ); auto explanation3 = std::shared_ptr( - new PLCLemma( { 1 }, 1, -4, Tightening::UB, Tightening::UB, emptyVec, RELU ) ); + new PLCLemma( { 1 }, 1, -4, Tightening::UB, Tightening::UB, emptyVec, RELU, 0 ) ); TS_ASSERT( root.getPLCLemmas().empty() ); From b87585193880b669120db8020bf1d289b58425b9 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 16 Oct 2025 21:03:22 +0300 Subject: [PATCH 78/81] Fix CHANGELOG.md. --- CHANGELOG.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d6dfec3a49..04050e6007 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,12 +8,9 @@ - Implemented backward analysis using preimage-approximation algorithm for `Relu`, `LeakyRelu`, `Sign` and `Bilinear` Layers. - Added the BaBSR heuristic as a new branching strategy for ReLU Splitting - Support Sub of two variables, "Mul" of two constants, Slice, and ConstantOfShape in the python onnx parser -<<<<<<< HEAD + - Renamed SmtCore module to SearchTreeHandler - Implemented backward analysis using INVPROP algorithm with added support for all activation functions. - Implemented backward analysis using partial multi-neuron relaxation with BBPS-based heuristic for neuron selection. -======= ->>>>>>> upstream/master - - Renamed SmtCore module to SearchTreeHandler ## Version 2.0.0 From 8af0d347f20425756fd89da0d480712ba97d87ce Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Sun, 19 Oct 2025 17:16:40 +0300 Subject: [PATCH 79/81] Remove unused constant. --- src/configuration/GlobalConfiguration.cpp | 3 +-- src/configuration/GlobalConfiguration.h | 5 +---- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index aa5429355e..32bc949aab 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -84,7 +84,6 @@ const double GlobalConfiguration::INVPROP_INITIAL_ALPHA = 0.5; const double GlobalConfiguration::INVPROP_INITIAL_GAMMA = 0.025; const unsigned GlobalConfiguration::PMNR_RANDOM_SEED = 1; -const unsigned GlobalConfiguration::PMNR_MAX_ITERATIONS = 1; const unsigned GlobalConfiguration::PMNR_SELECTED_NEURONS = 2; const unsigned GlobalConfiguration::PMNR_BBPS_BRANCHING_CANDIDATES = 10; @@ -138,7 +137,7 @@ const bool GlobalConfiguration::WRITE_JSON_PROOF = false; const unsigned GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH = 3; const unsigned GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS = 10; -const unsigned GlobalConfiguration::MAX_ROUNDS_OF_PMNR_BACKWARD_ANALYSIS = 10; +const unsigned GlobalConfiguration::MAX_ROUNDS_OF_PMNR_BACKWARD_ANALYSIS = 1; const bool GlobalConfiguration::ANALYZE_PROOF_DEPENDENCIES = false; const bool GlobalConfiguration::MINIMIZE_PROOF_DEPENDENCIES = false; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index d31c9cc874..ac7d7f7f97 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -193,10 +193,7 @@ class GlobalConfiguration // Random seed for PMNR (with randomized hyperplanes). static const unsigned PMNR_RANDOM_SEED; - // Random seed for PMNR (with randomized hyperplanes). - static const unsigned PMNR_MAX_ITERATIONS; - - // Random seed for PMNR (with heuristically selected hyperplanes). + // Number of selected neurons for PMNR (with heuristically selected hyperplanes). static const unsigned PMNR_SELECTED_NEURONS; // Number of candidates for PMNR-BBPS branching points. From 2184a86d5a41ac1039e246cc0c48f19d818c786d Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Fri, 21 Nov 2025 12:24:37 +0200 Subject: [PATCH 80/81] Results of PMNR Optimization Benchmarks: 1. BBPS heuristic final calculation modifed. 2. Alpha optimization removed from EXTENDED INVPROP. 3. PreimageApproximation is run before EXTENDED INVPROP. 4. BBPS branching candidates number increased to 100. 5. Removed milp-tightening options backward-invprop, backward-pmnr-random, backward-pmnr-gradient, renamed backward-pmnr-bbps to backward-pmnr. 6. Updated test files Test_PMNRSelection.h, Test_LPRelaxation.h, Test_PMNR.h. --- src/configuration/GlobalConfiguration.cpp | 5 +- src/configuration/GlobalConfiguration.h | 3 - src/configuration/OptionParser.cpp | 6 +- src/configuration/Options.cpp | 10 +- src/engine/Engine.cpp | 19 +- src/engine/MILPSolverBoundTighteningType.h | 15 +- src/nlr/Layer.cpp | 8 +- src/nlr/NetworkLevelReasoner.cpp | 342 +---- src/nlr/NetworkLevelReasoner.h | 9 +- src/nlr/tests/Test_LPRelaxation.h | 83 +- src/nlr/tests/Test_PMNR.h | 1380 ++------------------ src/nlr/tests/Test_PMNRSelection.h | 1007 +++----------- 12 files changed, 431 insertions(+), 2456 deletions(-) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index 32bc949aab..9ed8853b39 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -80,12 +80,11 @@ const unsigned GlobalConfiguration::INVPROP_MAX_ITERATIONS = 10; const double GlobalConfiguration::INVPROP_STEP_SIZE = 0.025; const double GlobalConfiguration::INVPROP_LEARNING_RATE = 0.005; const double GlobalConfiguration::INVPROP_WEIGHT_DECAY = 0.5; -const double GlobalConfiguration::INVPROP_INITIAL_ALPHA = 0.5; const double GlobalConfiguration::INVPROP_INITIAL_GAMMA = 0.025; const unsigned GlobalConfiguration::PMNR_RANDOM_SEED = 1; const unsigned GlobalConfiguration::PMNR_SELECTED_NEURONS = 2; -const unsigned GlobalConfiguration::PMNR_BBPS_BRANCHING_CANDIDATES = 10; +const unsigned GlobalConfiguration::PMNR_BBPS_BRANCHING_CANDIDATES = 100; const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; @@ -137,7 +136,7 @@ const bool GlobalConfiguration::WRITE_JSON_PROOF = false; const unsigned GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH = 3; const unsigned GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS = 10; -const unsigned GlobalConfiguration::MAX_ROUNDS_OF_PMNR_BACKWARD_ANALYSIS = 1; +const unsigned GlobalConfiguration::MAX_ROUNDS_OF_PMNR_BACKWARD_ANALYSIS = 10; const bool GlobalConfiguration::ANALYZE_PROOF_DEPENDENCIES = false; const bool GlobalConfiguration::MINIMIZE_PROOF_DEPENDENCIES = false; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index ac7d7f7f97..8cf91a02b6 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -184,9 +184,6 @@ class GlobalConfiguration // Weight decay for INVPROP optimization. static const double INVPROP_WEIGHT_DECAY; - // Initial alpha values for INVPROP optimization. - static const double INVPROP_INITIAL_ALPHA; - // Initial gamma values for INVPROP optimization. static const double INVPROP_INITIAL_GAMMA; diff --git a/src/configuration/OptionParser.cpp b/src/configuration/OptionParser.cpp index fd5a0916b5..73efed041b 100644 --- a/src/configuration/OptionParser.cpp +++ b/src/configuration/OptionParser.cpp @@ -267,10 +267,8 @@ void OptionParser::initialize() &( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ) ) ->default_value( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ), "The MILP solver bound tightening type: " - "lp/backward-once/backward-converge/backward-preimage-approx/backward-invprop/" - "backward-pmnr-random/" - "backward-pmnr-gradient/backward-pmnr-bbps/lp-inc/milp/milp-inc/" - "iter-prop/none." ) + "lp/backward-once/backward-converge/backward-preimage-approx/backward-pmnr/lp-inc/milp/" + "milp-inc/iter-prop/none." ) #endif ; diff --git a/src/configuration/Options.cpp b/src/configuration/Options.cpp index 8e9a4b5f87..d9fa7b0b3b 100644 --- a/src/configuration/Options.cpp +++ b/src/configuration/Options.cpp @@ -211,14 +211,8 @@ MILPSolverBoundTighteningType Options::getMILPSolverBoundTighteningType() const return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE; if ( strategyString == "backward-preimage-approx" ) return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX; - if ( strategyString == "backward-invprop" ) - return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP; - if ( strategyString == "backward-pmnr-random" ) - return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM; - if ( strategyString == "backward-pmnr-gradient" ) - return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT; - if ( strategyString == "backward-pmnr-bbps" ) - return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS; + if ( strategyString == "backward-pmnr" ) + return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR; else if ( strategyString == "milp" ) return MILPSolverBoundTighteningType::MILP_ENCODING; else if ( strategyString == "milp-inc" ) diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index 353b8a9bb3..19030f1326 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -1598,10 +1598,7 @@ void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR: _networkLevelReasoner->lpRelaxationPropagation(); break; case MILPSolverBoundTighteningType::MILP_ENCODING: @@ -1676,14 +1673,7 @@ void Engine::performAdditionalBackwardAnalysisIfNeeded() printf( "Backward analysis tightened %u bounds\n", tightened ); } - if ( _milpSolverBoundTighteningType == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP || - _milpSolverBoundTighteningType == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || - _milpSolverBoundTighteningType == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT || - _milpSolverBoundTighteningType == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) + if ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR ) { unsigned tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); if ( _verbosity > 0 ) @@ -1724,10 +1714,7 @@ void Engine::performMILPSolverBoundedTighteningForSingleLayer( unsigned targetIn case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR: case MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION: case MILPSolverBoundTighteningType::NONE: return; diff --git a/src/engine/MILPSolverBoundTighteningType.h b/src/engine/MILPSolverBoundTighteningType.h index a723455be6..42be276767 100644 --- a/src/engine/MILPSolverBoundTighteningType.h +++ b/src/engine/MILPSolverBoundTighteningType.h @@ -37,18 +37,11 @@ enum class MILPSolverBoundTighteningType { // Perform backward analysis using the PreimageApproximation Algorithm (arXiv:2305.03686v4 // [cs.SE]) BACKWARD_ANALYSIS_PREIMAGE_APPROX = 7, - // Perform backward analysis using INVPROP (arXiv:2302.01404v4 [cs.LG]) - BACKWARD_ANALYSIS_INVPROP = 8, - // Perform backward analysis using PMNR with random neuron selection. - BACKWARD_ANALYSIS_PMNR_RANDOM = 9, - // Perform backward analysis using PMNR with maximum gradient neuron selection (arXiv:1804.10829 - // [cs.AI]). - BACKWARD_ANALYSIS_PMNR_GRADIENT = 10, - // Perform backward analysis using PMNR with BBPS-based neuron selection (arXiv:2405.21063v3 - // [cs.LG]). - BACKWARD_ANALYSIS_PMNR_BBPS = 11, + // Perform backward analysis using PMNR with INVPROP and BBPS-based neuron selection + // (arXiv:2302.01404v4 [cs.LG], arXiv:2405.21063v3 [cs.LG]). + BACKWARD_ANALYSIS_PMNR = 8, // Option to have no MILP bound tightening performed - NONE = 20, + NONE = 9, }; #endif // __MILPSolverBoundTighteningType_h__ diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index cb43168c0c..c0a3dc6abd 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -79,13 +79,7 @@ void Layer::allocateMemory() Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX || Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR ) { _symbolicLb = new double[_size * _inputLayerSize]; _symbolicUb = new double[_size * _inputLayerSize]; diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index d5bfbe6c73..17a6417162 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -403,19 +403,13 @@ void NetworkLevelReasoner::lpRelaxationPropagation() _layerIndexToLayer, true, layerIndicesToParameters ); } else if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR ) { + const Vector optimalCoeffs = OptimalParameterisedSymbolicBoundTightening(); + Map> layerIndicesToParameters = + getParametersForLayers( optimalCoeffs ); const Vector &polygonalTightenings = OptimizeParameterisedPolygonalTightening(); - unsigned parameterCount = getNumberOfParameters(); - const Vector &coeffs = Vector( parameterCount, 0 ); - Map> layerIndicesToParameters = getParametersForLayers( coeffs ); lpFormulator.optimizeBoundsWithLpRelaxation( _layerIndexToLayer, false, layerIndicesToParameters, polygonalTightenings ); lpFormulator.optimizeBoundsWithLpRelaxation( @@ -1354,57 +1348,6 @@ double NetworkLevelReasoner::calculateDifferenceFromSymbolic( const Layer *layer return std::max( layer->getUb( i ) - upperSum, lowerSum - layer->getLb( i ) ); } -const Vector NetworkLevelReasoner::generatePolygonalTightenings() -{ - if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP ) - { - return generatePolygonalTighteningsForInvprop(); - } - else if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) - { - return generatePolygonalTighteningsForPMNR(); - } - - const Vector tighteningsVector = Vector( {} ); - return tighteningsVector; -} - -const Vector NetworkLevelReasoner::generatePolygonalTighteningsForInvprop() -{ - // Polygonal tightenings for INVPROP are box constraints for every non-input neuron. - Vector tightenings = Vector( {} ); - for ( const auto &pair : _layerIndexToLayer ) - { - Layer *layer = pair.second; - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - NeuronIndex index( pair.first, i ); - if ( layer->getLayerType() == Layer::WEIGHTED_SUM || layer->neuronNonfixed( i ) ) - { - Map lowerCoeffs; - Map upperCoeffs; - lowerCoeffs[index] = 1; - upperCoeffs[index] = 1; - PolygonalTightening lowerTightening( - lowerCoeffs, layer->getLb( i ), PolygonalTightening::LB ); - PolygonalTightening upperTightening( - upperCoeffs, layer->getUb( i ), PolygonalTightening::UB ); - tightenings.append( lowerTightening ); - tightenings.append( upperTightening ); - } - } - } - const Vector tighteningsVector = - Vector( tightenings ); - return tighteningsVector; -} - const Vector NetworkLevelReasoner::generatePolygonalTighteningsForPMNR() { Vector tightenings = Vector( {} ); @@ -1559,60 +1502,6 @@ const Vector NetworkLevelReasoner::generatePolygonalTighten } const Vector NetworkLevelReasoner::selectPMNRNeurons() -{ - switch ( Options::get()->getMILPSolverBoundTighteningType() ) - { - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM: - { - return selectPMNRNeuronsRandomly(); - break; - } - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS: - { - return selectPMNRNeuronsHeuristically(); - break; - } - default: - { - Vector emptyVector = Vector( {} ); - return emptyVector; - } - } -} - -const Vector NetworkLevelReasoner::selectPMNRNeuronsRandomly() -{ - // Randomly select layer with nonfixed neurons. - const Vector &candidateLayers = getLayersWithNonfixedNeurons(); - if ( candidateLayers.empty() ) - { - const Vector emptyVector( {} ); - return emptyVector; - } - std::mt19937_64 rng( GlobalConfiguration::PMNR_RANDOM_SEED ); - std::uniform_int_distribution dis( 0, candidateLayers.size() - 1 ); - unsigned entry = dis( rng ); - unsigned index = candidateLayers[entry]; - - // Randomly select nonfixed neurons from this layer. - Layer *layer = _layerIndexToLayer[index]; - const Vector &candidateNeurons = layer->getNonfixedNeurons(); - std::vector candidateNeuronsVector = candidateNeurons.getContainer(); - std::shuffle( candidateNeuronsVector.begin(), candidateNeuronsVector.end(), rng ); - - unsigned neuronCount = - std::min( GlobalConfiguration::PMNR_SELECTED_NEURONS, candidateNeurons.size() ); - Vector selectedNeurons = Vector( neuronCount ); - for ( unsigned i = 0; i < neuronCount; ++i ) - { - selectedNeurons[i] = candidateNeuronsVector[i]; - } - const Vector neurons( selectedNeurons ); - return neurons; -} - -const Vector NetworkLevelReasoner::selectPMNRNeuronsHeuristically() { // Select layer with maximal PMNR neuron score sum. const Vector &candidateLayers = getLayersWithNonfixedNeurons(); @@ -1673,7 +1562,7 @@ const Vector NetworkLevelReasoner::OptimizeParameterisedPol initializePMNRScoreMap(); // Repeatedly optimize polygonal tightenings given previously optimized ones. - const Vector &selectedTightenings = generatePolygonalTightenings(); + const Vector &selectedTightenings = generatePolygonalTighteningsForPMNR(); Vector optimizedTightenings = Vector( {} ); for ( unsigned i = 0; i < selectedTightenings.size(); ++i ) { @@ -1683,21 +1572,9 @@ const Vector NetworkLevelReasoner::OptimizeParameterisedPol double bound = OptimizeSingleParameterisedPolygonalTightening( tightening, optimizedTightenings, maximize, feasibiltyBound ); - // For PMNR, attempt to obtain stronger bound by branching selected neurons if supported. - if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) - { - tightening._value = OptimizeSingleParameterisedPolygonalTighteningWithBranching( - tightening, optimizedTightenings, maximize, bound ); - } - else - { - tightening._value = bound; - } + // Attempt to obtain stronger bound by branching selected neurons. + tightening._value = OptimizeSingleParameterisedPolygonalTighteningWithBranching( + tightening, optimizedTightenings, maximize, bound ); optimizedTightenings.append( tightening ); // Store optimized tightenings in NLR. @@ -1799,41 +1676,26 @@ double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTightening( double feasibilityBound, const Map &neuronToBranchIndex ) { - // Search over coeffs in [0, 1]^numberOfParameters, gamma in - // [0, inf)^sizeOfPrevTightenings with PGD. + // Search over gamma in [0, inf)^sizeOfPrevTightenings with PGD. unsigned maxIterations = GlobalConfiguration::INVPROP_MAX_ITERATIONS; - double coeffsStepSize = GlobalConfiguration::INVPROP_STEP_SIZE; double gammaStepSize = GlobalConfiguration::INVPROP_STEP_SIZE; double epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; double weightDecay = GlobalConfiguration::INVPROP_WEIGHT_DECAY; double lr = GlobalConfiguration::INVPROP_LEARNING_RATE; - unsigned coeffsDimension = getNumberOfParameters(); unsigned gammaDimension = prevTightenings.size(); double sign = ( maximize ? 1 : -1 ); double bestBound = tightening._value; - Vector coeffsLowerBounds( coeffsDimension, 0 ); - Vector coeffsUpperBounds( coeffsDimension, 1 ); Vector gammaLowerBounds( gammaDimension, 0 ); - Vector coeffs( coeffsDimension, GlobalConfiguration::INVPROP_INITIAL_ALPHA ); Vector gamma( gammaDimension, GlobalConfiguration::INVPROP_INITIAL_GAMMA ); - Vector previousCoeffs( coeffs ); Vector previousGamma( gamma ); - Vector> coeffsCandidates( coeffsDimension ); Vector> gammaCandidates( gammaDimension ); - Vector coeffsGradient( coeffsDimension ); Vector gammaGradient( gammaDimension ); for ( unsigned i = 0; i < maxIterations; ++i ) { - for ( unsigned j = 0; j < coeffsDimension; ++j ) - { - coeffs[j] += weightDecay * ( coeffs[j] - previousCoeffs[j] ); - coeffs[j] = std::min( coeffs[j], coeffsUpperBounds[j] ); - coeffs[j] = std::max( coeffs[j], coeffsLowerBounds[j] ); - } for ( unsigned j = 0; j < gammaDimension; ++j ) { gamma[j] += weightDecay * ( gamma[j] - previousGamma[j] ); @@ -1841,7 +1703,7 @@ double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTightening( } double currentCost = getParameterisdPolygonalTighteningBound( - coeffs, gamma, tightening, prevTightenings, neuronToBranchIndex ); + gamma, tightening, prevTightenings, neuronToBranchIndex ); // If calculated bound is stronger than known feasibility bound, stop optimization. if ( !FloatUtils::isFinite( currentCost ) || maximize ? currentCost > feasibilityBound @@ -1850,29 +1712,6 @@ double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTightening( return currentCost; } - for ( unsigned j = 0; j < coeffsDimension; ++j ) - { - coeffsCandidates[j] = Vector( coeffs ); - coeffsCandidates[j][j] += coeffsStepSize; - if ( coeffsCandidates[j][j] > coeffsUpperBounds[j] || - coeffsCandidates[j][j] < coeffsLowerBounds[j] ) - { - coeffsGradient[j] = 0; - continue; - } - - double cost = getParameterisdPolygonalTighteningBound( - coeffsCandidates[j], gamma, tightening, prevTightenings, neuronToBranchIndex ); - if ( !FloatUtils::isFinite( cost ) || maximize ? cost > feasibilityBound - : cost < feasibilityBound ) - { - return cost; - } - - coeffsGradient[j] = ( cost - currentCost ) / coeffsStepSize; - bestBound = ( maximize ? std::max( bestBound, cost ) : std::min( bestBound, cost ) ); - } - for ( unsigned j = 0; j < gammaDimension; ++j ) { gammaCandidates[j] = Vector( gamma ); @@ -1884,7 +1723,7 @@ double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTightening( } double cost = getParameterisdPolygonalTighteningBound( - coeffs, gammaCandidates[j], tightening, prevTightenings, neuronToBranchIndex ); + gammaCandidates[j], tightening, prevTightenings, neuronToBranchIndex ); if ( !FloatUtils::isFinite( cost ) || maximize ? cost > feasibilityBound : cost < feasibilityBound ) { @@ -1896,13 +1735,6 @@ double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTightening( } bool gradientIsZero = true; - for ( unsigned j = 0; j < coeffsDimension; ++j ) - { - if ( FloatUtils::abs( coeffsGradient[j] ) > epsilon ) - { - gradientIsZero = false; - } - } for ( unsigned j = 0; j < gammaDimension; ++j ) { if ( FloatUtils::abs( gammaGradient[j] ) > epsilon ) @@ -1914,13 +1746,6 @@ double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTightening( { break; } - for ( unsigned j = 0; j < coeffsDimension; ++j ) - { - previousCoeffs[j] = coeffs[j]; - coeffs[j] += sign * lr * coeffsGradient[j]; - coeffs[j] = std::min( coeffs[j], coeffsUpperBounds[j] ); - coeffs[j] = std::max( coeffs[j], coeffsLowerBounds[j] ); - } for ( unsigned j = 0; j < gammaDimension; ++j ) { previousGamma[j] = gamma[j]; @@ -1933,15 +1758,11 @@ double NetworkLevelReasoner::OptimizeSingleParameterisedPolygonalTightening( } double NetworkLevelReasoner::getParameterisdPolygonalTighteningBound( - const Vector &coeffs, const Vector &gamma, PolygonalTightening &tightening, Vector &prevTightenings, const Map &neuronToBranchIndex ) { - // First, run parameterised DeepPoly. - parameterisedDeepPoly( true, coeffs ); - // Recursively compute vectors mu, muHat for every layer with the backpropagation procedure. unsigned numLayers = _layerIndexToLayer.size(); unsigned maxLayer = _layerIndexToLayer.size() - 1; @@ -2143,47 +1964,11 @@ void NetworkLevelReasoner::initializePMNRScoreMap() { for ( const auto &index : pair.second->getNonfixedNeurons() ) { - _neuronToPMNRScores.insert( index, calculateNeuronPMNRScore( index ) ); + _neuronToPMNRScores.insert( index, calculatePMNRBBPSScore( index ) ); } } } -double NetworkLevelReasoner::calculateNeuronPMNRScore( NeuronIndex index ) -{ - ASSERT( _layerIndexToLayer[index._layer]->neuronNonfixed( index._neuron ) ); - - if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT ) - { - return calculatePMNRGradientScore( index ); - } - else if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) - { - return calculatePMNRBBPSScore( index ); - } - return 0; -} - -double NetworkLevelReasoner::calculatePMNRGradientScore( NeuronIndex index ) -{ - double score = 0; - unsigned neuron = index._neuron; - unsigned layerIndex = index._layer; - unsigned outputLayerSize = _layerIndexToLayer[getNumberOfLayers() - 1]->getSize(); - - // Given upper and lower symbolic bounds for a neuron, ita gradient vector is estimated as - // the average of its symbolic weights, and its Gradient heuristic is the gradient squared. - for ( unsigned i = 0; i < outputLayerSize; ++i ) - { - score += std::pow( ( getOutputSymbolicLb( layerIndex )[neuron * outputLayerSize + i] + - getOutputSymbolicUb( layerIndex )[neuron * outputLayerSize + i] ) / - 2.0, - 2 ); - } - return score; -} - double NetworkLevelReasoner::calculatePMNRBBPSScore( NeuronIndex index ) { // Initialize BBPS branching points and branch symbolic bound maps. @@ -2230,88 +2015,71 @@ double NetworkLevelReasoner::calculatePMNRBBPSScore( NeuronIndex index ) // neuron, and branch symbolic bounds of this neuron in terms of one source neuron. std::pair point = getBBPSBranchingPoint( index ); NeuronIndex sourceIndex = point.first; - double value = point.second; - const Layer *sourceLayer = _layerIndexToLayer[sourceIndex._layer]; + const Layer *sourceLayer = getLayer( sourceIndex._layer ); double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - const Vector values = Vector( { sourceLb, value, sourceUb } ); Vector symbolicLbPerBranch = getSymbolicLbPerBranch( index ); Vector symbolicUbPerBranch = getSymbolicUbPerBranch( index ); Vector symbolicLowerBiasPerBranch = getSymbolicLowerBiasPerBranch( index ); Vector symbolicUpperBiasPerBranch = getSymbolicUpperBiasPerBranch( index ); - unsigned branchCount = values.size() - 1; - ASSERT( symbolicLbPerBranch.size() == branchCount ); + unsigned branchCount = symbolicLbPerBranch.size(); ASSERT( symbolicUbPerBranch.size() == branchCount ); ASSERT( symbolicLowerBiasPerBranch.size() == branchCount ); ASSERT( symbolicUpperBiasPerBranch.size() == branchCount ); - Vector scores( branchCount, 0 ); - for ( unsigned i = 0; i < branchCount; ++i ) + // For every output neuron, substitute branch symbolic bounds in the output symbolic bounds, + // sum over all branches and output bounds. + double sourceSymbolicLb = 0; + double sourceSymbolicUb = 0; + double sourceSymbolicLowerBias = 0; + double sourceSymbolicUpperBias = 0; + for ( unsigned i = 0; i < outputLayerSize; ++i ) { - // Substitute the branch symbolic bounds in the output symbolic bounds. - Vector sourceSymbolicLb( outputLayerSize, 0 ); - Vector sourceSymbolicUb( outputLayerSize, 0 ); - Vector sourceSymbolicLowerBias( outputLayerSize, 0 ); - Vector sourceSymbolicUpperBias( outputLayerSize, 0 ); - - for ( unsigned j = 0; j < outputLayerSize; ++j ) + for ( unsigned j = 0; j < branchCount; ++j ) { - sourceSymbolicLowerBias[j] += concretizedOutputSymbolicLowerBias[j]; - sourceSymbolicUpperBias[j] += concretizedOutputSymbolicUpperBias[j]; + sourceSymbolicLowerBias += concretizedOutputSymbolicLowerBias[i]; + sourceSymbolicUpperBias += concretizedOutputSymbolicUpperBias[i]; - if ( concretizedOutputSymbolicLb[j] > 0 ) + if ( concretizedOutputSymbolicLb[i] > 0 ) { - sourceSymbolicLb[j] += concretizedOutputSymbolicLb[j] * symbolicLbPerBranch[i]; - sourceSymbolicLowerBias[j] += - concretizedOutputSymbolicLb[j] * symbolicLowerBiasPerBranch[i]; + sourceSymbolicLb += concretizedOutputSymbolicLb[i] * symbolicLbPerBranch[j]; + sourceSymbolicLowerBias += + concretizedOutputSymbolicLb[i] * symbolicLowerBiasPerBranch[j]; } else { - sourceSymbolicLb[j] += concretizedOutputSymbolicLb[j] * symbolicUbPerBranch[i]; - sourceSymbolicLowerBias[j] += - concretizedOutputSymbolicLb[j] * symbolicUpperBiasPerBranch[i]; + sourceSymbolicLb += concretizedOutputSymbolicLb[i] * symbolicUbPerBranch[j]; + sourceSymbolicLowerBias += + concretizedOutputSymbolicLb[i] * symbolicUpperBiasPerBranch[j]; } - if ( concretizedOutputSymbolicUb[j] > 0 ) + if ( concretizedOutputSymbolicUb[i] > 0 ) { - sourceSymbolicUb[j] += concretizedOutputSymbolicUb[j] * symbolicUbPerBranch[i]; - sourceSymbolicUpperBias[j] += - concretizedOutputSymbolicUb[j] * symbolicUpperBiasPerBranch[i]; + sourceSymbolicUb += concretizedOutputSymbolicUb[i] * symbolicUbPerBranch[j]; + sourceSymbolicUpperBias += + concretizedOutputSymbolicUb[i] * symbolicUpperBiasPerBranch[j]; } else { - sourceSymbolicUb[j] += concretizedOutputSymbolicUb[j] * symbolicLbPerBranch[i]; - sourceSymbolicUpperBias[j] += - concretizedOutputSymbolicUb[j] * symbolicLowerBiasPerBranch[i]; + sourceSymbolicUb += concretizedOutputSymbolicUb[i] * symbolicLbPerBranch[j]; + sourceSymbolicUpperBias += + concretizedOutputSymbolicUb[i] * symbolicLowerBiasPerBranch[j]; } - - // concretize the source neuron to get concrete bounds for every output neuron. - double concreteLb = - sourceSymbolicLb[j] > 0 - ? sourceSymbolicLb[j] * values[i] + sourceSymbolicLowerBias[j] - : sourceSymbolicLb[j] * values[i + 1] + sourceSymbolicLowerBias[j]; - double concreteUb = sourceSymbolicUb[j] > 0 - ? sourceSymbolicUb[j] * values[i + 1] + sourceSymbolicUpperBias[j] - : sourceSymbolicUb[j] * values[i] + sourceSymbolicUpperBias[j]; - - double outputLb = outputLayer->getLb( j ); - double outputUb = outputLayer->getUb( j ); - - // The branch's score is the improvement of concrete bounds over known DeepPoly bounds. - scores[i] += - std::max( outputUb - concreteUb, 0.0 ) + std::max( concreteLb - outputLb, 0.0 ); } } - // The neuron's final PMNR-BBPS score is the average of its branch scores. - double score = 0; - for ( unsigned i = 0; i < branchCount; ++i ) - { - score += scores[i]; - } - return score / branchCount; + // Concretize the source neuron to get upper and lower bounds for the symbolic expression. + // The neuron's final score is the range of the concrete bounds divided by the branch count. + double scoreLower = sourceSymbolicLb > 0 + ? sourceSymbolicLb * sourceLb + sourceSymbolicLowerBias + : sourceSymbolicLb * sourceUb + sourceSymbolicLowerBias; + double scoreUpper = sourceSymbolicUb > 0 + ? sourceSymbolicUb * sourceUb + sourceSymbolicUpperBias + : sourceSymbolicUb * sourceLb + sourceSymbolicUpperBias; + + return ( scoreUpper - scoreLower ) / branchCount; } void NetworkLevelReasoner::initializeBBPSBranchingMaps() @@ -3584,19 +3352,9 @@ unsigned NetworkLevelReasoner::getNumberOfParametersPerType( Layer::Type t ) con bool NetworkLevelReasoner::supportsInvpropBranching( Layer::Type type ) const { - // Without BBPS heuristic, only branch Relu/Sign/Abs/Leaky Relu activation before INVPROP. - if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_RANDOM || - Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_GRADIENT ) - { - return type == Layer::RELU || type == Layer::LEAKY_RELU || type == Layer::SIGN || - type == Layer::ABSOLUTE_VALUE; - } - // When using BBPS heuristic, all implemented activations could be branched before INVPROP. - else if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR_BBPS ) + if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR ) { return type == Layer::RELU || type == Layer::LEAKY_RELU || type == Layer::SIGN || type == Layer::ABSOLUTE_VALUE || type == Layer::MAX || type == Layer::ROUND || diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index 07b01c6a38..fd285551fd 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -355,15 +355,11 @@ class NetworkLevelReasoner : public LayerOwner Map &point, unsigned i ) const; - // Heuristically generating optimizable polygonal tightening for INVPROP or PMNR. - const Vector generatePolygonalTightenings(); + // Heuristically generating optimizable polygonal tightening for PMNR. const Vector generatePolygonalTighteningsForPMNR(); - const Vector generatePolygonalTighteningsForInvprop(); // Heuristically select neurons for PMNR. const Vector selectPMNRNeurons(); - const Vector selectPMNRNeuronsRandomly(); - const Vector selectPMNRNeuronsHeuristically(); // Optimize biases of generated parameterised polygonal tightenings. double OptimizeSingleParameterisedPolygonalTightening( @@ -381,7 +377,6 @@ class NetworkLevelReasoner : public LayerOwner // Get current lower bound for selected parameterised polygonal tightenings' biases. double getParameterisdPolygonalTighteningBound( - const Vector &coeffs, const Vector &gamma, PolygonalTightening &tightening, Vector &prevTightenings, @@ -396,8 +391,6 @@ class NetworkLevelReasoner : public LayerOwner // Calculate PMNRScore for every non-fixed neurons. void initializePMNRScoreMap(); - double calculateNeuronPMNRScore( NeuronIndex index ); - double calculatePMNRGradientScore( NeuronIndex index ); double calculatePMNRBBPSScore( NeuronIndex index ); // Initialize PMNR-BBPS branching point scores and per-branch predecessor symbolic bounds for diff --git a/src/nlr/tests/Test_LPRelaxation.h b/src/nlr/tests/Test_LPRelaxation.h index 6aafe080ec..242916a81e 100644 --- a/src/nlr/tests/Test_LPRelaxation.h +++ b/src/nlr/tests/Test_LPRelaxation.h @@ -4462,7 +4462,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); // Change the current bounds @@ -4530,7 +4530,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -4614,7 +4614,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); // Change the current bounds @@ -4730,7 +4730,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -4795,7 +4795,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); // Change the current bounds @@ -4876,7 +4876,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -4961,7 +4961,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); // Change the current bounds @@ -5074,7 +5074,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -5127,7 +5127,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite List expectedBounds2( {} ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); // Change the current bounds @@ -5190,7 +5190,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite List expectedBounds4( {} ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -5256,7 +5256,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite List expectedBounds2( {} ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); // Change the current bounds @@ -5350,7 +5350,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite List expectedBounds4( {} ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -5406,7 +5406,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); // Change the current bounds @@ -5480,7 +5480,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -5546,7 +5546,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); // Change the current bounds @@ -5631,7 +5631,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -5656,38 +5656,45 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite return allFound; } - // Create list of all tightenings in newBounds for which there is no bound in newBounds or in - // bounds which is at least as tight. - List removeRedundancies( const List &bounds, - const List &newBounds ) + // Create list of all tightenings in bounds for which there is no bound in newBounds + // or in previousBounds which is at least as tight. + List removeRedundancies( const List &newBounds, + const List &bounds ) { List minimalBounds; - - for ( const auto &newBound : newBounds ) + unsigned i = 0; + for ( const auto &bound : newBounds ) { bool foundTighter = false; - for ( const auto &bound : bounds ) + unsigned j = 0; + for ( const auto &otherBound : newBounds ) { - foundTighter |= - ( newBound._type == bound._type && newBound._variable == bound._variable && - ( ( newBound._type == Tightening::LB && - FloatUtils::lte( newBound._value, bound._value, 0.0001 ) ) || - ( newBound._type == Tightening::UB && - FloatUtils::gte( newBound._value, bound._value, 0.0001 ) ) ) ); + if ( i < j ) + { + foundTighter |= + ( bound._type == otherBound._type && + bound._variable == otherBound._variable && + ( ( bound._type == Tightening::LB && + FloatUtils::lte( bound._value, otherBound._value, 0.0001 ) ) || + ( bound._type == Tightening::UB && + FloatUtils::gte( bound._value, otherBound._value, 0.0001 ) ) ) ); + } + ++j; } - - for ( const auto &bound : newBounds ) + for ( const auto &otherBound : bounds ) { foundTighter |= - ( newBound._type == bound._type && newBound._variable == bound._variable && - ( ( newBound._type == Tightening::LB && - FloatUtils::lt( newBound._value, bound._value, 0.0001 ) ) || - ( newBound._type == Tightening::UB && - FloatUtils::gt( newBound._value, bound._value, 0.0001 ) ) ) ); + ( bound._type == otherBound._type && bound._variable == otherBound._variable && + ( ( bound._type == Tightening::LB && + FloatUtils::lte( bound._value, otherBound._value, 0.0001 ) ) || + ( bound._type == Tightening::UB && + FloatUtils::gte( bound._value, otherBound._value, 0.0001 ) ) ) ); } - if ( !foundTighter ) - minimalBounds.append( newBound ); + { + minimalBounds.append( bound ); + } + ++i; } return minimalBounds; } diff --git a/src/nlr/tests/Test_PMNR.h b/src/nlr/tests/Test_PMNR.h index 5ca1bbb181..428ecdf515 100644 --- a/src/nlr/tests/Test_PMNR.h +++ b/src/nlr/tests/Test_PMNR.h @@ -548,1053 +548,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 11, large ); } - void populateNetworkMinimalReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - 1 R 1 R -1 1 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / / - 1 \ / 0 \ / / - \/ \/ / - /\ /\ / - 1 / \ 2 / \ -1 / - / \ R / \ R / - x1 --- x3 ---> x5 --- x7 ---> x9 - -1 1 1.5 - - The example described in Fig. 2 of - https://proceedings.neurips.cc/paper_files/paper/2019/file/0a9fdbb17feb6ccb7ec405cfb85222c4-Paper.pdf - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, 0 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - - nlr.setBias( 3, 1, 1.5 ); - nlr.setBias( 5, 0, 1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 11 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - } - - void test_backward_abs_and_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkWithAbsAndRelu( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -5, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, -5, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, -14, Tightening::LB ), Tightening( 13, 26.25, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 10, 0, Tightening::LB ), - Tightening( 11, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); - } - - void test_backward_round_and_sign() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkWithRoundAndSign( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -5.5, Tightening::LB ), Tightening( 9, 7.5, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); - } - - void test_backward_leaky_relu_and_sigmoid() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkWithLeakyReluAndSigmoid( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - - Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9997, Tightening::UB ), - Tightening( 11, 0.0180, Tightening::LB ), Tightening( 11, 0.9975, Tightening::UB ), - - Tightening( 12, 0.0025, Tightening::LB ), Tightening( 12, 0.9997, Tightening::UB ), - Tightening( 13, 0.0564, Tightening::LB ), Tightening( 13, 3.9922, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 6, -0.5, Tightening::LB ), - Tightening( 7, -0.1, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); - } - - void test_backward_softmax_and_max() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkWithSoftmaxAndMax( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - - Tightening( 5, 0.0066, Tightening::LB ), Tightening( 5, 0.9517, Tightening::UB ), - Tightening( 6, 0.0007, Tightening::LB ), Tightening( 6, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), - - Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), - - Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - - Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); - } - - void test_backward_relu_and_bilinear() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkWithReluAndBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 35, Tightening::UB ), - - Tightening( 11, -35, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); - } - - void test_pmnr_invprop_abs_and_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkWithAbsAndRelu( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -5, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, -5, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, -14, Tightening::LB ), Tightening( 13, 26.25, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 10, 0, Tightening::LB ), - Tightening( 11, 0, Tightening::LB ), - - Tightening( 12, 0, Tightening::LB ), - Tightening( 13, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); - } - - void test_pmnr_invprop_round_and_sign() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkWithRoundAndSign( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -5.5, Tightening::LB ), Tightening( 9, 7.5, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 9, -4.75, Tightening::LB ), - Tightening( 9, 6.75, Tightening::UB ), - - Tightening( 12, 1, Tightening::UB ), - Tightening( 13, 4, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); - } - - void test_pmnr_invprop_leaky_relu_and_sigmoid() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkWithLeakyReluAndSigmoid( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - - Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9997, Tightening::UB ), - Tightening( 11, 0.0180, Tightening::LB ), Tightening( 11, 0.9975, Tightening::UB ), - - Tightening( 12, 0.0025, Tightening::LB ), Tightening( 12, 0.9997, Tightening::UB ), - Tightening( 13, 0.0564, Tightening::LB ), Tightening( 13, 3.9922, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 6, -0.5, Tightening::LB ), - Tightening( 7, -0.1, Tightening::LB ), - Tightening( 8, 7.1, Tightening::UB ), - Tightening( 8, -1.5, Tightening::LB ), - Tightening( 9, 5.1, Tightening::UB ), - Tightening( 9, -1.1, Tightening::LB ), - Tightening( 10, 0.0845, Tightening::LB ), - Tightening( 10, 0.9993, Tightening::UB ), - Tightening( 11, 0.2181, Tightening::LB ), - Tightening( 11, 0.9949, Tightening::UB ), - Tightening( 12, 0.0845, Tightening::LB ), - Tightening( 12, 0.9993, Tightening::UB ), - Tightening( 13, 0.7410, Tightening::LB ), - Tightening( 13, 3.9841, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); - } - - void test_pmnr_invprop_softmax_and_max() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkWithSoftmaxAndMax( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - - Tightening( 5, 0.0066, Tightening::LB ), Tightening( 5, 0.9517, Tightening::UB ), - Tightening( 6, 0.0007, Tightening::LB ), Tightening( 6, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), - - Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), - - Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - - Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 8, -0.6812, Tightening::LB ), - Tightening( 8, 1.8414, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); - } - - void test_pmnr_invprop_relu_and_bilinear() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-invprop" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkWithReluAndBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 35, Tightening::UB ), - - Tightening( 11, -35, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke Invprop - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); - } - - void test_pmnr_random_abs_and_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkWithAbsAndRelu( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -5, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, -5, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, -14, Tightening::LB ), Tightening( 13, 26.25, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 10, 0, Tightening::LB ), - Tightening( 11, 0, Tightening::LB ), - - Tightening( 12, 0, Tightening::LB ), - Tightening( 13, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); - } - - void test_pmnr_random_round_and_sign() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkWithRoundAndSign( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -5.5, Tightening::LB ), Tightening( 9, 7.5, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 9, -4.75, Tightening::LB ), - Tightening( 9, 6.75, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); - } - - void test_pmnr_random_leaky_relu_and_sigmoid() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkWithLeakyReluAndSigmoid( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -6, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - - Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9997, Tightening::UB ), - Tightening( 11, 0.0180, Tightening::LB ), Tightening( 11, 0.9975, Tightening::UB ), - - Tightening( 12, 0.0025, Tightening::LB ), Tightening( 12, 0.9997, Tightening::UB ), - Tightening( 13, 0.0564, Tightening::LB ), Tightening( 13, 3.9922, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 6, -0.5, Tightening::LB ), - Tightening( 7, -0.1, Tightening::LB ), - Tightening( 8, -1.5, Tightening::LB ), - Tightening( 8, 7.1, Tightening::UB ), - Tightening( 9, -1.1, Tightening::LB ), - Tightening( 9, 5.1, Tightening::UB ), - Tightening( 10, 0.0266, Tightening::LB ), - Tightening( 10, 0.9995, Tightening::UB ), - Tightening( 11, 0.1679, Tightening::LB ), - Tightening( 11, 0.9960, Tightening::UB ), - Tightening( 12, 0.0266, Tightening::LB ), - Tightening( 12, 0.9995, Tightening::UB ), - Tightening( 13, 0.5302, Tightening::LB ), - Tightening( 13, 3.9875, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); - } - - void test_pmnr_random_softmax_and_max() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkWithSoftmaxAndMax( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - - Tightening( 5, 0.0066, Tightening::LB ), Tightening( 5, 0.9517, Tightening::UB ), - Tightening( 6, 0.0007, Tightening::LB ), Tightening( 6, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), - - Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), - - Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - - Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 8, -0.6812, Tightening::LB ), - Tightening( 8, 1.8409, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); - } - - void test_pmnr_random_relu_and_bilinear() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-random" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkWithReluAndBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 35, Tightening::UB ), - - Tightening( 11, -35, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke PMNR with random neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); - } - - void test_pmnr_gradient_abs_and_relu() + void test_backward_abs_and_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); + "backward-converge" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -1633,7 +591,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with gradient-based heuristic for neuron selection + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); @@ -1641,26 +599,18 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite List expectedBounds2( { Tightening( 10, 0, Tightening::LB ), Tightening( 11, 0, Tightening::LB ), - - Tightening( 12, 0, Tightening::LB ), - Tightening( 13, 0, Tightening::LB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_gradient_round_and_sign() + void test_backward_round_and_sign() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); + "backward-converge" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -1699,29 +649,23 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with gradient-based heuristic for neuron selection + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds2( - { Tightening( 9, -4.75, Tightening::LB ), Tightening( 9, 6.75, Tightening::UB ) } ); + List expectedBounds2( {} ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_gradient_leaky_relu_and_sigmoid() + void test_backward_leaky_relu_and_sigmoid() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); + "backward-converge" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -1760,7 +704,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with gradient-based heuristic for neuron selection + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); @@ -1768,35 +712,18 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite List expectedBounds2( { Tightening( 6, -0.5, Tightening::LB ), Tightening( 7, -0.1, Tightening::LB ), - Tightening( 8, -1.5, Tightening::LB ), - Tightening( 8, 7.1, Tightening::UB ), - Tightening( 9, -1.1, Tightening::LB ), - Tightening( 9, 5.1, Tightening::UB ), - Tightening( 10, 0.0230, Tightening::LB ), - Tightening( 10, 0.9995, Tightening::UB ), - Tightening( 11, 0.1483, Tightening::LB ), - Tightening( 11, 0.9961, Tightening::UB ), - Tightening( 12, 0.0230, Tightening::LB ), - Tightening( 12, 0.9995, Tightening::UB ), - Tightening( 13, 0.4680, Tightening::LB ), - Tightening( 13, 3.9879, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_gradient_softmax_and_max() + void test_backward_softmax_and_max() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); + "backward-converge" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -1833,31 +760,23 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with gradient-based heuristic for neuron selection + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds2( { - Tightening( 8, -0.6812, Tightening::LB ), - Tightening( 8, 1.8414, Tightening::UB ), - } ); + List expectedBounds2( {} ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_gradient_relu_and_bilinear() + void test_backward_relu_and_bilinear() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); + "backward-converge" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -1894,7 +813,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with gradient-based heuristic for neuron selection + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); @@ -1902,20 +821,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite List expectedBounds2( {} ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - List> infeasibleBranches( {} ); - List> expectedInfeasibleBranches( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getInfeasibleBranches( infeasibleBranches ) ); - TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_bbps_abs_and_relu() + void test_pmnr_abs_and_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -1954,7 +867,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with BBPS-based heuristic for neuron selection + // Invoke PMNR for neuron selection. TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); @@ -1969,7 +882,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); List> infeasibleBranches( {} ); @@ -1978,11 +891,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_bbps_round_and_sign() + void test_pmnr_round_and_sign() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -2021,7 +933,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with BBPS-based heuristic for neuron selection + // Invoke PMNR for neuron selection. TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); @@ -2030,7 +942,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { Tightening( 9, -4.75, Tightening::LB ), Tightening( 9, 6.75, Tightening::UB ) } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); List> infeasibleBranches( {} ); @@ -2039,11 +951,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_bbps_leaky_relu_and_sigmoid() + void test_pmnr_leaky_relu_and_sigmoid() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -2082,7 +993,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with BBPS-based heuristic for neuron selection + // Invoke PMNR for neuron selection. TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); @@ -2094,18 +1005,18 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 8, 7.1, Tightening::UB ), Tightening( 9, -1.1, Tightening::LB ), Tightening( 9, 5.1, Tightening::UB ), - Tightening( 10, 0.0269, Tightening::LB ), - Tightening( 10, 0.9995, Tightening::UB ), - Tightening( 11, 0.1696, Tightening::LB ), - Tightening( 11, 0.9960, Tightening::UB ), - Tightening( 12, 0.0269, Tightening::LB ), - Tightening( 12, 0.9995, Tightening::UB ), - Tightening( 13, 0.5358, Tightening::LB ), - Tightening( 13, 3.9875, Tightening::UB ), + Tightening( 10, 0.1541, Tightening::LB ), + Tightening( 10, 0.9992, Tightening::UB ), + Tightening( 11, 0.2422, Tightening::LB ), + Tightening( 11, 0.9942, Tightening::UB ), + Tightening( 12, 0.1541, Tightening::LB ), + Tightening( 12, 0.9992, Tightening::UB ), + Tightening( 13, 0.8827, Tightening::LB ), + Tightening( 13, 3.9817, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); List> infeasibleBranches( {} ); @@ -2114,11 +1025,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_bbps_softmax_and_max() + void test_pmnr_softmax_and_max() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -2155,18 +1065,18 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with BBPS-based heuristic for neuron selection + // Invoke PMNR for neuron selection. TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { Tightening( 8, -0.6812, Tightening::LB ), - Tightening( 8, 1.8414, Tightening::UB ), + Tightening( 8, 1.6790, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); + TS_ASSERT_THROWS_NOTHING( bounds = removeRedundancies( newBounds, bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); List> infeasibleBranches( {} ); @@ -2175,11 +1085,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_bbps_relu_and_bilinear() + void test_pmnr_relu_and_bilinear() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -2216,7 +1125,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // Invoke PMNR with BBPS-based heuristic for neuron selection + // Invoke PMNR for neuron selection. TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); @@ -2224,7 +1133,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite List expectedBounds2( {} ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( newBounds ); + bounds = removeRedundancies( newBounds, bounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); List> infeasibleBranches( {} ); @@ -2233,126 +1142,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( infeasibleBranchesEqual( infeasibleBranches, expectedInfeasibleBranches ) ); } - void test_pmnr_bbps_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkMinimalReLU( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke Parameterised DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly() ); - - /* - Input ranges: - - x0: [-1, 1] - x1: [-1, 1] - - Layers 1, 2: - - x2 = x0 + x1 - x2.lb = x0 + x1 : [-2, 2] - x2.ub = x0 + x1 : [-2, 2] - - x3 = x0 - x1 - x3.lb = x0 - x1 : [-2, 2] - x3.ub = x0 - x1 : [-2, 2] - - Both ReLUs are undecided, bounds are concretized. 2 = ub <= -lb = 2, using ReLU lower - coefficient of 0. Upper coefficient: 2/( 2--2 ) = 2/4 = 0.5 - - 0 <= x4 <= 0.5x2 + 1 - x4.lb = 0 - x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 - x4 range: [0, 2] - - 0 <= x5 <= 0.5x3 + 1 - x5.lb = 0 - x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 - x5 range: [0, 2] - - Layers 3, 4: - - x6 = x4 + 2x5 - x6.lb = 1 ( 0 ) + 2 ( 0 ) = 0 : [0, 0] - x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 2 ( 0.5x0 - 0.5x1 + 1 ) = 1.5 x0 - 0.5 x1 + 3 : [1, 5] - x6 range: [0, 5] - - x7 = x5 + 1.5 - x7.lb = 1 ( 0 ) + 1.5 = 1.5 : [1.5, 1.5] - x7.ub = 1 ( 0.5x0 - 0.5x1 + 1 ) + 1.5 = 0.5x0 - 0.5x1 + 2.5 : [1.5, 3.5] - x7 range: [1.5, 3.5] - - Both ReLU are active, bounds surive the activation - - x6 <= x8 <= x6 - x8.lb = 0 - x8.ub = 1.5 x0 - 0.5 x1 + 3 - x8 range: [0, 5] - - x7 <= x9 <= x7 - x9.lb = 1.5 - x9.ub = 0.5x0 - 0.5x1 + 2.5 - x9 range: [1.5, 3.5] - - Layer 5: - x10 = - x8 + x9 + 1 - x10.lb = -1 ( x6 ) + 1 ( x7 ) + 1 = -1 ( x4 + 2x5 ) + 1 ( x5 + 1.5 ) + 1 = -x4 - x5 + 2.5 - >= - ( 0.5x2 + 1 ) - ( 0.5x3 + 1 ) + 2.5 = -0.5x2 - 0.5x3 + 0.5 = -x0 + 0.5 >= -0.5 : - [-0.5, -0.5] x10.ub = -1 ( x6 ) + 1 ( x7 ) + 1 = -1 ( x4 + 2x5 ) + 1 ( x5 + 1.5 ) + 2.5 = - -x4 - x5 + 2.5 - <= - ( 0 ) - ( 0 ) + 2.5 = 2.5 : [2.5, 2.5] - x10 range: [-0.5, 2.5] - - */ - - List expectedBounds( { - Tightening( 2, -2, Tightening::LB ), - Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -2, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), - Tightening( 6, 5, Tightening::UB ), - Tightening( 7, 1.5, Tightening::LB ), - Tightening( 7, 3.5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), - Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 1.5, Tightening::LB ), - Tightening( 9, 3.5, Tightening::UB ), - Tightening( 10, -0.5, Tightening::LB ), - Tightening( 10, 2.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Invoke PMNR with BBPS heuristic for neuron selection - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { Tightening( 10, 0.5000, Tightening::LB ) } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - bool boundsEqual( const List &bounds, const List &expectedBounds ) { if ( bounds.size() != expectedBounds.size() ) @@ -2417,34 +1206,77 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite return allFound; } - // Create list of all tightenings in newBounds for which there is no bound in newBounds which is + // Create list of all tightenings in bounds for which there is no bound which is // at least as tight. - List removeRedundancies( const List &newBounds ) + List removeRedundancies( const List &bounds ) { List minimalBounds; - unsigned i = 0; - for ( const auto &newBound : newBounds ) + for ( const auto &bound : bounds ) { bool foundTighter = false; unsigned j = 0; - for ( const auto &bound : newBounds ) + for ( const auto &otherBound : bounds ) { if ( i < j ) { foundTighter |= - ( newBound._type == bound._type && newBound._variable == bound._variable && - ( ( newBound._type == Tightening::LB && - FloatUtils::lte( newBound._value, bound._value, 0.0001 ) ) || - ( newBound._type == Tightening::UB && - FloatUtils::gte( newBound._value, bound._value, 0.0001 ) ) ) ); + ( bound._type == otherBound._type && + bound._variable == otherBound._variable && + ( ( bound._type == Tightening::LB && + FloatUtils::lte( bound._value, otherBound._value, 0.0001 ) ) || + ( bound._type == Tightening::UB && + FloatUtils::gte( bound._value, otherBound._value, 0.0001 ) ) ) ); } ++j; } - if ( !foundTighter ) - minimalBounds.append( newBound ); + { + minimalBounds.append( bound ); + } + ++i; + } + return minimalBounds; + } + // Create list of all tightenings in bounds for which there is no bound in newBounds + // or in previousBounds which is at least as tight. + List removeRedundancies( const List &newBounds, + const List &bounds ) + { + List minimalBounds; + unsigned i = 0; + for ( const auto &bound : newBounds ) + { + bool foundTighter = false; + unsigned j = 0; + for ( const auto &otherBound : newBounds ) + { + if ( i < j ) + { + foundTighter |= + ( bound._type == otherBound._type && + bound._variable == otherBound._variable && + ( ( bound._type == Tightening::LB && + FloatUtils::lte( bound._value, otherBound._value, 0.0001 ) ) || + ( bound._type == Tightening::UB && + FloatUtils::gte( bound._value, otherBound._value, 0.0001 ) ) ) ); + } + ++j; + } + for ( const auto &otherBound : bounds ) + { + foundTighter |= + ( bound._type == otherBound._type && bound._variable == otherBound._variable && + ( ( bound._type == Tightening::LB && + FloatUtils::lte( bound._value, otherBound._value, 0.0001 ) ) || + ( bound._type == Tightening::UB && + FloatUtils::gte( bound._value, otherBound._value, 0.0001 ) ) ) ); + } + if ( !foundTighter ) + { + minimalBounds.append( bound ); + } ++i; } return minimalBounds; diff --git a/src/nlr/tests/Test_PMNRSelection.h b/src/nlr/tests/Test_PMNRSelection.h index 22bef3be40..e8dcce6d31 100644 --- a/src/nlr/tests/Test_PMNRSelection.h +++ b/src/nlr/tests/Test_PMNRSelection.h @@ -1449,44 +1449,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); } - void test_gradient_selection_relus_active_and_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke Parameterised DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - - /* - Calculating Gradient-based PMNR score of x4: - Symbolic bounds of output layer in terms of Layer 2: - x4 - x5 <= x6 <= x4 - x5. - x4 gradient: lower = ( 1 ), upper = ( 1 ), average = ( 1 ). - Gradient-based PMNR score of x4: ( 1 )^2 = 1. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 1 ); - } - void test_bbps_selection_relus_active_and_not_fixed() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -1528,15 +1494,16 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Lower branch, using x2: [-4, 0], 0 <= x4 <= 0: Output symbolic bounds -11 <= x6 <= -5. - Concrete bounds: [-11, -5]. DeepPoly bounds: [-9, 1]. Improvement: 6. - Upper branch, using x2: [0, 12], x2 <= x4 <= x2: Output symbolic bounds x2 - 11 <= x6 <= x2 - 5. - Concrete bounds: [-11, 7]. DeepPoly bounds: [-9, 1]. Improvement: 0. - Final score = ( 6 + 0 ) / 2 = 3. + Summing over all branches: + Lower symbolic expression: x2 - 22 >= -26. + Upper symbolic expression: x2 - 10 <= 2. + + Final score = ( 2 - (-26) ) / 2 = 14. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 3 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 14 ); } void test_symbolic_bound_maps_relus_active_and_externally_fixed() @@ -1844,48 +1811,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr, Set( { NLR::NeuronIndex( 2, 0 ), NLR::NeuronIndex( 4, 0 ) } ) ); } - void test_gradient_selection_relu_residual1() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTReluResidual1( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - - // Invoke Parameterised DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - - /* - Calculating Gradient-based PMNR score of x2: - Symbolic bounds of output layer in terms of Layer 2: - -3x2 - 2 <= x5 <= -2x2 + 10. - x2 gradient: lower = ( -3 ), upper = ( -2 ), average = ( -2.5 ). - Gradient-based PMNR score of x2: ( -2.5 )^2 = 6.25. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 6.25 ); - - /* - Calculating Gradient-based PMNR score of x4: - Symbolic bounds of output layer in terms of Layer 4: - 3x4 - 2 <= x5 <= 3x4 + 4. - x4 gradient: lower = ( 3 ), upper = ( 3 ), average = ( 3 ). - Gradient-based PMNR score of x4: ( 3 )^2 = 9. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 9 ); - } - void test_bbps_selection_relu_residual1() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -1938,30 +1867,32 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Lower branch, using x1: [-1, 0], 0 <= x4 <= 0: Output symbolic bounds -2 <= x5 <= 10. - Concrete bounds: [-2, 10]. DeepPoly bounds: [1, 6]. Improvement: 0. - Upper branch, using x1: [0, 1], x2 <= x4 <= x2: Output symbolic bounds -3x1 - 2 <= x5 <= -2x1 + 10. - Concrete bounds: [-5, 10]. DeepPoly bounds: [1, 6]. Improvement: 0. - Final score = ( 0 + 0 ) / 2 = 0. + Summing over all branches: + Lower symbolic expression: -3x1 - 4 >= -7. + Upper symbolic expression: -2x1 + 20 <= 22. + + Final score = ( 22 - (-7) ) / 2 = 14.5. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 14.5 ); /* Calculating BBPS-based PMNR score of x4: Symbolic bounds of output layer in terms of Layer 4: 3x4 - 2 <= x5 <= 3x4 + 4. Lower branch, using x3: [-1, 0], 0 <= x4 <= 0: Output symbolic bounds -2 <= x5 <= 4. - Concrete bounds: [-2, 4]. DeepPoly bounds: [1, 6]. Improvement: 2. - Upper branch, using x3: [0, 2], x2 <= x4 <= x2: Output symbolic bounds 3x3 - 2 <= x5 <= 3x3 + 4. - Concrete bounds: [-2, 10]. DeepPoly bounds: [1, 6]. Improvement: 0. - Final score = ( 0 + 2 ) / 2 = 1. + Summing over all branches: + Lower symbolic expression: -3x3 - 4 >= -7. + Upper symbolic expression: 3x3 + 8 <= 14. + + Final score = ( 14 - (-7) ) / 2 = 10.5. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 1 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 10.5 ); } void test_symbolic_bound_maps_relu_residual2() @@ -2150,48 +2081,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr, Set( { NLR::NeuronIndex( 2, 0 ), NLR::NeuronIndex( 4, 0 ) } ) ); } - void test_gradient_selection_relu_residual2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTReluResidual2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - - // Invoke Parameterised DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - - /* - Calculating Gradient-based PMNR score of x2: - Symbolic bounds of output layer in terms of Layer 2: - -3x2 + 2 <= x5 <= -2x2 + 6. - x4 gradient: lower = ( -3 ), upper = ( -2 ), average = ( -2.5 ). - Gradient-based PMNR score of x2: ( -2.5 )^2 = 6.25. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 6.25 ); - - /* - Calculating Gradient-based PMNR score of x4: - Symbolic bounds of output layer in terms of Layer 4: - 3x4 <= x5 <= 3x4 + 2. - x4 gradient: lower = ( 3 ), upper = ( 3 ), average = ( 3 ). - Gradient-based PMNR score of x4: ( 3 )^2 = 9. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 9 ); - } - void test_bbps_selection_relu_residual2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -2244,30 +2137,32 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Lower branch, using x1: [-1, 0], 0 <= x4 <= 0: Output symbolic bounds 2 <= x6 <= 6. - Concrete bounds: [2, 6]. DeepPoly bounds: [-1, 6]. Improvement: 3. - Upper branch, using x1: [0, 1], x2 <= x4 <= x2: Output symbolic bounds -3x1 + 2 <= x6 <= -2x1 + 6. - Concrete bounds: [-1, 6]. DeepPoly bounds: [-1, 6]. Improvement: 0. - Final score = ( 3 + 0 ) / 2 = 1.5. + Summing over all branches: + Lower symbolic expression: -3x1 + 4 >= 1. + Upper symbolic expression: -2x1 + 12 <= 14. + + Final score = ( 14 - 1 ) / 2 = 6.5. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 1.5 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 6.5 ); /* Calculating BBPS-based PMNR score of x4: Symbolic bounds of output layer in terms of Layer 4: 3x4 <= x6 <= 3x4 + 2. - Lower branch, using x3: [0, 1], 0 <= x4 <= 0: + Lower branch, using x3: [-1, 0], 0 <= x4 <= 0: Output symbolic bounds 0 <= x6 <= 2. - Concrete bounds: [0, 2]. DeepPoly bounds: [-1, 6]. Improvement: 5. - Upper branch, using x3: [0, 2], x2 <= x4 <= x2: Output symbolic bounds 3x3 <= x6 <= 3x3 + 2. - Concrete bounds: [0, 8]. DeepPoly bounds: [-1, 6]. Improvement: 1. - Final score = ( 5 + 1 ) / 2 = 3. + Summing over all branches: + Lower symbolic expression: 3x3 >= -3. + Upper symbolic expression: 3x3 + 4 <= 10. + + Final score = ( 10 - (-3) ) / 2 = 6.5. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 3 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 6.5 ); } void test_symbolic_bound_maps_relu_reindex() @@ -2479,59 +2374,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NeuronIndex( 4, 0 ) } ) ); } - void test_gradient_selection_relu_reindex() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTReluReindex( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke Parameterised DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - - /* - Calculating Gradient-based PMNR score of x4: - Symbolic bounds of output layer in terms of Layer 2: - x4 + x5 + 1 <= x10 <= 1.5x4 + 0.5x5 + 2, 0 <= x11 <= 0.5x4 - 0.5x5 + 1. - x4 gradient: lower = ( 1, 0 ), upper = ( 1.5, 0.5 ), average = ( 1.25, 0.25 ). - Gradient-based PMNR score of x4: ( 1.25 )^2 + ( 0.25 )^2 = 1.625. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0.625 ); - - /* - Calculating Gradient-based PMNR score of x5: - Symbolic bounds of output layer in terms of Layer 2: - x4 + x5 + 1 <= x10 <= 1.5x4 + 0.5x5 + 2, 0 <= x11 <= 0.5x4 - 0.5x5 + 1. - x5 gradient: lower = ( 1, 0 ), upper = ( 0.5, -0.5 ), average = ( 0.75, -0.25 ). - Gradient-based PMNR score of x5: ( 0.75 )^2 + ( -0.25 )^2 = 0.625. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 1.625 ); - - /* - Calculating Gradient-based PMNR score of x9: - Symbolic bounds of output layer in terms of Layer 4: - x8 + x9 + 1 <= x10 <= x8 + x9 + 1, x9 <= x11 <= x9. - x9 gradient: lower = ( 1, 1 ), upper = ( 1, 1 ), average = ( 1, 1 ). - Gradient-based PMNR score of x9: ( ( 1 )^2 + ( 1 )^2 ) / 2 = 2. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 2 ); - } - void test_bbps_selection_relu_reindex() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -2603,42 +2449,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x4 + x5 + 1 <= x10 <= 1.5x4 + 0.5x5 + 2, 0 <= x11 <= 0.5x4 - 0.5x5 + 1. Concretizing x5: x4 + 1 <= x10 <= 1.5x4 + 3, 0 <= x11 <= 0.5x4 + 1. - Lower branch, using x2: [-2, 0], 0 <= x4 <= 0: Output symbolic bounds: - 1 <= x10 <= 3, 0 <= x11 <= 1. - Concrete bounds: x10: [1, 3], x11: [0, 1]. - DeepPoly bounds: x10: [1, 5.5], x11: [0, 2]. - Improvement: 2.5 + 1 = 3.5. + Lower branch, using x2: [-2, 0], 0 <= x4 <= 0. + Output symbolic bounds: 1 <= x10 <= 3, 0 <= x11 <= 1. + Upper branch, using x2: [0, 2], x2 <= x4 <= x2: + Output symbolic bounds: x2 + 1 <= x10 <= 1.5x2 + 3, 0 <= x11 <= 0.5x2 + 1. - Upper branch, using x2: [0, 2], x2 <= x4 <= x2: Output symbolic bounds: - x2 + 1 <= x10 <= 1.5x2 + 3, 0 <= x11 <= 0.5x2 + 1. - Concrete bounds: x10: [1, 6], x11: [0, 2]. - DeepPoly bounds: x10: [1, 5.5], x11: [0, 2]. - Improvement: 0 + 0 = 0. + Summing over all branches and output neurons: + Lower symbolic expression: x2 + 2 >= 0. + Upper symbolic expression: 2x2 + 8 <= 12. - Final score = ( 3.5 + 0 ) / 2 = 1.75. + Final score = ( 12 - 0 ) / 2 = 6. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 1.75 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 6 ); /* Calculating BBPS-based PMNR score of x5: Symbolic bounds of output layer in terms of Layer 2: x4 + x5 + 1 <= x10 <= 1.5x4 + 0.5x5 + 2, 0 <= x11 <= 0.5x4 - 0.5x5 + 1. Concretizing x4: x5 + 1 <= x10 <= 0.5x5 + 5, 0 <= x11 <= -0.5x5 + 2. - Lower branch, using x3: [-2, 0], 0 <= x5 <= 0: Output symbolic bounds: - 1 <= x10 <= 5, 0 <= x11 <= 2. - Concrete bounds: x10: [1, 5], x11: [0, 2]. - DeepPoly bounds: x10: [1, 5.5], x11: [0, 2]. - Improvement: 0.5 + 0 = 0.5. - - Upper branch, using x3: [0, 2], x3 <= x5 <= x3: Output symbolic bounds: + Lower branch, using x3: [-2, 0], 0 <= x5 <= 0: + Output symbolic bounds: 1 <= x10 <= 5, 0 <= x11 <= 2. + Upper branch, using x3: [0, 2], x3 <= x5 <= x3: + Output symbolic bounds: x3 + 1 <= x10 <= 0.5x3 + 5, 0 <= x11 <= -0.5x3 + 2. - Concrete bounds: x10: [1, 6], x11: [0, 2]. - DeepPoly bounds: x10: [1, 5.5], x11: [0, 2]. - Improvement: 0 + 0 = 0. - Final score = ( 0.5 + 0 ) / 2 = 0.25. + Summing over all branches and output neurons: + Lower symbolic expression: x3 + 2 >= 0. + Upper symbolic expression: 14 <= 14. + + Final score = ( 14 - 0 ) / 2 = 7. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0.25 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 7 ); /* Calculating BBPS-based PMNR score of x9: Symbolic bounds of output layer in terms of Layer 4: @@ -2647,19 +2488,16 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Lower branch, using x7: [-2, 0], 0 <= x9 <= 0: Output symbolic bounds: 1 <= x10 <= 4, 0 <= x11 <= 0. - Concrete bounds: x10: [1, 4], x11: [0, 0]. - DeepPoly bounds: x10: [1, 5.5], x11: [0, 2]. - Improvement: 1.5 + 2 = 3.5. - Lower branch, using x7: [0, 2], 0 <= x9 <= 0: Output symbolic bounds: x7 + 1 <= x10 <= x7 + 4, x7 <= x11 <= x7. - Concrete bounds: x10: [1, 6], x11: [0, 2]. - DeepPoly bounds: x10: [1, 5.5], x11: [0, 2]. - Improvement: 0 + 0 = 0. - Final score = ( 3.5 + 0 ) / 2 = 1.75. + Summing over all branches and output neurons: + Lower symbolic expression: 2x7 + 2 >= -2. + Upper symbolic expression: 2x7 + 8 <= 12. + + Final score = ( 12 - (-2) ) / 2 = 7. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 1.75 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 7 ); } void test_symbolic_bound_maps_absolute_values_all_positive() @@ -3062,44 +2900,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); } - void test_gradient_selection_absolute_values_positive_and_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTAbsoluteValue( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke Parameterised DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - - /* - Calculating Gradient-based PMNR score of x4: - Symbolic bounds of output layer in terms of Layer 2: - x4 - x5 <= x6 <= x4 - x5. - x4 gradient: lower = ( 1 ), upper = ( 1 ), average = ( 1 ). - Gradient-based PMNR score of x4: ( 1 )^2 = 1. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 1 ); - } - void test_bbps_selection_absolute_values_positive_and_not_fixed() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -3141,15 +2945,16 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Lower branch, using x2: [-4, 0], -x2 <= x4 <= -x2: Output symbolic bounds -x2 - 11 <= x6 <= -x2 - 5. - Concrete bounds: [-11, -1]. DeepPoly bounds: [-11, 7]. Improvement: 8. - Upper branch, using x2: [0, 12], x2 <= x4 <= x2: Output symbolic bounds x2 - 11 <= x6 <= x2 - 5. - Concrete bounds: [-11, 7]. DeepPoly bounds: [-11, 7]. Improvement: 0. - Final score = ( 8 + 0 ) / 2 = 4. + Summing over all branches: + Lower symbolic expression: -22 >= -22. + Upper symbolic expression: -10 <= -10. + + Final score = ( -10 + (-22) ) / 2 = 6. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 4 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 6 ); } void test_symbolic_bound_maps_absolute_values_active_and_externally_fixed() @@ -3432,44 +3237,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); } - void test_gradient_selection_signs_positive_and_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSign( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke Parameterised DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - - /* - Calculating Gradient-based PMNR score of x4: - Symbolic bounds of output layer in terms of Layer 2: - x4 - x5 <= x6 <= x4 - x5. - x4 gradient: lower = ( 1 ), upper = ( 1 ), average = ( 1 ). - Gradient-based PMNR score of x4: ( 1 )^2 = 1. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 1 ); - } - void test_bbps_selection_signs_positive_and_not_fixed() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -3511,15 +3282,16 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Lower branch, using x2: [-4, 0], -1 <= x4 <= -1: Output symbolic bounds -2 <= x6 <= -2. - Concrete bounds: [-2, -2]. DeepPoly bounds: [-2, 0]. Improvement: 2. - Upper branch, using x2: [0, 12], 1 <= x4 <= 1: Output symbolic bounds 0 <= x6 <= 0. - Concrete bounds: [0, 0]. DeepPoly bounds: [-2, 0]. Improvement: 2. - Final score = ( 2 + 2 ) / 2 = 2. + Summing over all branches: + Lower symbolic expression: -2 >= -2. + Upper symbolic expression: 0 <= 0. + + Final score = ( (-2) - (-2) ) / 2 = 0. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 2 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0 ); } void test_symbolic_bound_maps_signs_active_and_externally_fixed() @@ -3877,67 +3649,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NeuronIndex( 4, 1 ) } ) ); } - void test_gradient_selection_leaky_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke Parameterised DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - - /* - Calculating Gradient-based PMNR score of x4: - Symbolic bounds of output layer in terms of Layer 2: - 2x4 + 1 <= x10 <= 19/15 x4 + 1/15 x5 + 229/75, x4 - x5 <= x11 <= 0.6x4 - 0.6x5 + 1.12. - x4 gradient: lower = ( 2, 1 ), upper = ( 19/15, 0.6 ), average = ( 49/30, 0.8 ). - Gradient-based PMNR score of x4: ( 49/30 )^2 + ( 0.8 )^2 = 3.3078. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 3.3078 ); - - /* - Calculating Gradient-based PMNR score of x5: - Symbolic bounds of output layer in terms of Layer 2: - 2x4 + 1 <= x10 <= 19/15 x4 + 1/15 x5 + 229/75, x4 - x5 <= x11 <= 0.6x4 - 0.6x5 + 1.12. - x5 gradient: lower = ( 0, -1 ), upper = ( 1/15, -0.6 ), average = ( 1/30, -0.8 ). - Gradient-based PMNR score of x5: ( 1/30 )^2 + ( 0.8 )^2 = 0.6411. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 0.6411 ); - - /* - Calculating Gradient-based PMNR score of x8: - Symbolic bounds of output layer in terms of Layer 4: - x8 + x9 + 1 <= x10 <= x8 + x9 + 1, x9 <= x11 <= x9. - x8 gradient: lower = ( 1, 0 ), upper = ( 1, 0 ), average = ( 1, 0 ). - Gradient-based PMNR score of x8: ( 1 )^2 + ( 0 )^2 = 1. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 1 ); - - /* - Calculating Gradient-based PMNR score of x9: - Symbolic bounds of output layer in terms of Layer 4: - x8 + x9 + 1 <= x10 <= x8 + x9 + 1, x9 <= x11 <= x9. - x9 gradient: lower = ( 1, 1 ), upper = ( 1, 1 ), average = ( 1, 1 ). - Gradient-based PMNR score of x9: ( 1 )^2 + ( 1 )^2 = 2. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 4, 1 ), 2 ); - } - void test_bbps_selection_leaky_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -4028,19 +3743,16 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Lower branch, using x2: [-2, 0], 0.2 x2 <= x4 <= 0.2 x2: Output symbolic bounds: 0.4 x2 + 1 <= x10 <= 19/75 x2 + 239/75, 0.2 x2 - 2 <= x11 <= 0.12 x2 + 2.32. - Concrete bounds: x10: [0.2, 239/75], x11: [-2.4, 2.32]. - DeepPoly bounds: x10: [-3, 5.64], x11: [-2.8, 2.8]. - Improvement: 424/75 + 0.88 = 490/75. - Upper branch, using x6: [0, 2], x2 <= x4 <= x2: Output symbolic bounds: 2x2 + 1 <= x10 <= 19/15 x2 + 239/75, x2 - 2 <= x11 <= 0.6x2 + 2.32. - Concrete bounds: x10: [1, 5.72], x11: [-2, 3.52]. - DeepPoly bounds: x10: [-3, 5.64], x11: [-2.8, 2.8]. - Improvement: 4 + 0.8 = 4.8. - Final score = ( 490/75 + 4.8 ) / 2 = 17/3 = 5.6667. + Summing over all branches and output neurons: + Lower symbolic expression: 3.6 x2 - 2 >= -9.2. + Upper symbolic expression: 2.24 x2 + 826/75 <= 1162/75. + + Final score = ( 1162/75 - (-9.2) ) / 2 = 926/75 = 12.3467. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 5.6667 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 12.3467 ); /* Calculating BBPS-based PMNR score of x5: Symbolic bounds of output layer in terms of Layer 2: @@ -4049,19 +3761,16 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Lower branch, using x3: [-2, 0], 0.2 x3 <= x5 <= 0.2 x3: Output symbolic bounds: -3 <= x10 <= 1/75 x3 + 419/75, -0.2 x3 - 2 <= x11 <= -0.12 x3 + 2.32. - Concrete bounds: x10: [-3, 419/75], x11: [-2, 2.56]. - DeepPoly bounds: x10: [-3, 5.64], x11: [-2.8, 2.8]. - Improvement: 4/75 + 1.04 = 82/75. - Upper branch, using x3: [0, 2], x3 <= x5 <= x3: Output symbolic bounds: -3 <= x10 <= 1/15 x3 + 419/75, -x3 - 2 <= x11 <= -0.6x3 + 2.32. - Concrete bounds: x10: [-3, 5.72], x11: [-4, 2.32]. - DeepPoly bounds: x10: [-3, 5.64], x11: [-2.8, 2.8]. - Improvement: 0 + 0.48 = 0.48. - Final score = ( 82/75 + 0.48 ) / 2 = 59/75 = 0.7867. + Summing over all branches and output neurons: + Lower symbolic expression: -1.2 x3 - 10 >= -12.4. + Upper symbolic expression: -0.64 x3 + 1186/75 <= 1282/75. + + Final score = ( 1282/75 - (-12.4) ) / 2 = 1106/75 = 14.7467. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 0.7867 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 14.7467 ); /* Calculating BBPS-based PMNR score of x8: Symbolic bounds of output layer in terms of Layer 4: @@ -4070,19 +3779,16 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Lower branch, using x6: [-2, 0], 0.2 x6 <= x8 <= 0.2 x6: Output symbolic bounds: 0.2 x6 - 1.8 <= x10 <= 0.2 x6 + 3.8, -2.8 <= x11 <= 2.8. - Concrete bounds: x10: [-2.2, 3.8], x11: [-2.8, 2.8]. - DeepPoly bounds: x10: [-3, 5.64], x11: [-2.8, 2.8]. - Improvement: 2.64 + 2.8 = 2.64. - Lower branch, using x6: [0, 2.8], x6 <= x8 <= x6: Output symbolic bounds: x6 - 1.8 <= x10 <= x6 + 3.8, -2.8 <= x11 <= 2.8. - Concrete bounds: x10: [-1.8, 6.6], x11: [-2.8, 2.8]. - DeepPoly bounds: x10: [-3, 5.64], x11: [-2.8, 2.8]. - Improvement: 1.2 + 0 = 1.2. - Final score = ( 2.64 + 1.2 ) / 2 = 1.92. + Summing over all branches and output neurons: + Lower symbolic expression: 1.2 x6 - 9.2 >= -11.6. + Upper symbolic expression: 1.2 x6 + 13.2 <= 16.56. + + Final score = ( 16.56 - (-11.6) ) / 2 = 14.08. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 1.92 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 14.08 ); /* Calculating BBPS-based PMNR score of x9: Symbolic bounds of output layer in terms of Layer 4: @@ -4091,19 +3797,16 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Lower branch, using x7: [-2.8, 0], 0.2 x7 <= x9 <= 0.2 x7: Output symbolic bounds: 0.2 x7 - 1 <= x10 <= 0.2 x7 + 3.8, 0.2 x7 <= x11 <= 0.2 x7. - Concrete bounds: x10: [-1.56, 3.8], x11: [-0.56, 0]. - DeepPoly bounds: x10: [-3, 5.64], x11: [-2.8, 2.8]. - Improvement: 3.28 + 5.04 = 8.32. - Lower branch, using x7: [0, 2.8], x7 <= x9 <= x7: Output symbolic bounds: x7 - 1 <= x10 <= x7 + 3.8, x7 <= x11 <= x7. - Concrete bounds: x10: [-1, 6.6], x11: [0, 2.8]. - DeepPoly bounds: x10: [-3, 5.64], x11: [-2.8, 2.8]. - Improvement: 2 + 2.8 = 4.8. - Final score = ( 8.32 + 4.8 ) / 2 = 6.56. + Summing over all branches and output neurons: + Lower symbolic expression: 2.4 x7 - 2 >= -8.72. + Upper symbolic expression: 2.4 x7 + 7.6 <= 14.32. + + Final score = ( 14.32 + (-8.72) ) / 2 = 11.52. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 4, 1 ), 6.56 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 1 ), 11.52 ); } void test_symbolic_bound_maps_sigmoids_and_round() @@ -4304,68 +4007,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NeuronIndex( 4, 1 ) } ) ); } - void test_gradient_selection_sigmoids_and_round() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSigmoidsAndRound( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke Parameterised DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - - /* - Calculating Gradient-based PMNR score of x4: - Symbolic bounds of output layer in terms of Layer 2: - x4 + x5 - 0.5 <= x8 <= x4 + x5 + 0.5, x4 - x5 - 0.5 <= x9 <= x4 - x5 + 0.5. - x4 gradient: lower = ( 1, 1 ), upper = ( 1, 1 ), average = ( 1, 1 ). - Gradient-based PMNR score of x4: ( 1 )^2 + ( 1 )^2 = 2. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 2 ); - - /* - Calculating Gradient-based PMNR score of x5: - Symbolic bounds of output layer in terms of Layer 2: - x4 + x5 - 0.5 <= x8 <= x4 + x5 + 0.5, x4 - x5 - 0.5 <= x9 <= x4 - x5 + 0.5. - x5 gradient: lower = ( 1, -1 ), upper = ( 1, -1 ), average = ( 1, -1 ). - Gradient-based PMNR score of x5: ( 1 )^2 + ( -1 )^2 = 2. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 2 ); - - /* - Calculating Gradient-based PMNR score of x8: - Symbolic bounds of output layer in terms of Layer 4: - x8 <= x8 <= x8, x9 <= x9 <= x9. - x8 gradient: lower = ( 1, 0 ), upper = ( 1, 0 ), average = ( 1, 0 ). - Gradient-based PMNR score of x8: ( 1 )^2 + ( 0 )^2 = 1. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 1 ); - - /* - Calculating Gradient-based PMNR score of x9: - Symbolic bounds of output layer in terms of Layer 4: - x8 <= x8 <= x8, x9 <= x9 <= x9. - x9 gradient: lower = ( 1, 0 ), upper = ( 1, 0 ), average = ( 1, 0 ). - Gradient-based PMNR score of x9: ( 1 )^2 + ( 0 )^2 = 1. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 4, 1 ), 1 ); - } - void test_bbps_selection_sigmoids_and_round() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -4381,17 +4026,17 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - // Using branching point (x2, -2/11) for x4 (SIGMOID). + // Using branching point (x2, -2/101) for x4 (SIGMOID). compareBBPSBranchingPoints( nlr, NLR::NeuronIndex( 2, 0 ), - std::pair( { NLR::NeuronIndex( 1, 0 ), -0.1818 } ) ); + std::pair( { NLR::NeuronIndex( 1, 0 ), -0.0198 } ) ); - // Using branching point (x3, -2/11) for x5 (SIGMOID). + // Using branching point (x3, -2/101) for x5 (SIGMOID). compareBBPSBranchingPoints( nlr, NLR::NeuronIndex( 2, 1 ), - std::pair( { NLR::NeuronIndex( 1, 1 ), -0.1818 } ) ); + std::pair( { NLR::NeuronIndex( 1, 1 ), -0.0198 } ) ); // Using branching point (x6, 0.5) for x8 (ROUND). compareBBPSBranchingPoints( @@ -4422,19 +4067,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite return min(g_prime(l), g_prime(u)) l3 = l4 = -2 - u3 = u4 = -2/11 + u3 = u4 = -2/101 l5 = l6 = g(-2) - u5 = u6 = g(-2/11) + u5 = u6 = g(-2/101) lambda7 = lam(l3, u3) lambda7_prime = lam_prime(l3, u3) lambda8 = lam(l4, u4) lambda8_prime = lam_prime(l4, u4) ''' - Sigmoid linear relaxation ( Layer 2 ), lower branches ( x2: [-2, 2/11], x3: [-2, 2/11] - ): x4 >= lambda7_prime * x2 + ( g(l3) - lambda7_prime * l3 ) x4 <= lambda7 * x2 + ( g(u3) - - lambda7 * u3 ) x5 >= lambda8_prime * x3 + ( g(l4) - lambda8_prime * l4 ) x5 <= lambda8 - * x3 + ( g(u4) - lambda8 * u4 ) + Layer 2 Sigmoid linear relaxation, lower branches x2: [-2, 2/101], x3: [-2, 2/101]: + x4 >= lambda7_prime * x2 + ( g(l3) - lambda7_prime * l3 ) + x4 <= lambda7 * x2 + ( g(u3) - lambda7 * u3 ) + x5 >= lambda8_prime * x3 + ( g(l4) - lambda8_prime * l4 ) + x5 <= lambda8 * x3 + ( g(u4) - lambda8 * u4 ) ''' print('------------------') print(lambda7_prime) @@ -4446,9 +4092,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite print(g(l4) - lambda8_prime * l4) print(g(u4) - lambda8 * u4) - l3 = l4 = -2/11 + l3 = l4 = -2/101 u3 = u4 = 2 - l5 = l6 = g(-2/11) + l5 = l6 = g(-2/101) u5 = u6 = g(2) lambda7 = lam(l3, u3) lambda7_prime = lam_prime(l3, u3) @@ -4456,7 +4102,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite lambda8_prime = lam_prime(l4, u4) ''' - Sigmoid linear relaxation ( Layer 2 ), upper branches ( x2: [2/11, 2], x3: [2/11, 2] ): + Layer 2 Sigmoid linear relaxation, upper branches x2: [-2/101, 2], x3: [-2/101, 2]: x4 >= lambda7_prime * x2 + ( g(l3) - lambda7_prime * l3 ) x4 <= lambda7_prime * x2 + ( g(u3) - lambda7_prime * u3 ) x5 >= lambda8_prime * x3 + ( g(l4) - lambda8_prime * l4 ) @@ -4474,39 +4120,39 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite [output]: ------------------ 0.1049935854035065 - 0.18450703649914935 + 0.18980260606696492 0.1049935854035065 - 0.18450703649914935 + 0.18980260606696492 0.3291900928291306 - 0.4882169950204162 + 0.4988081341560474 0.3291900928291306 - 0.4882169950204162 + 0.4988081341560474 ------------------ 0.10499358540350662 0.10499358540350662 - 0.47376000391211753 + 0.49712874760825615 0.6708099071708691 - 0.47376000391211753 + 0.49712874760825615 0.6708099071708691 - Lower branch symbolic bounds: 0.1050 x2 + 0.3292 <= x4 <= 0.1845 x2 + 0.4882. - Upper branch symbolic bounds: 0.1050 x2 + 0.4737 <= x4 <= 0.1050 x2 + 0.6708. + Lower branch symbolic bounds: 0.1050 x2 + 0.3292 <= x4 <= 0.1898 x2 + 0.4988. + Upper branch symbolic bounds: 0.1050 x2 + 0.4971 <= x4 <= 0.1050 x2 + 0.6708. - Lower branch symbolic bounds: 0.1050 x3 + 0.3292 <= x5 <= 0.1845 x3 + 0.4882. - Upper branch symbolic bounds: 0.1050 x3 + 0.4737 <= x5 <= 0.1050 x3 + 0.6708. + Lower branch symbolic bounds: 0.1050 x3 + 0.3292 <= x5 <= 0.1845 x3 + 0.4988. + Upper branch symbolic bounds: 0.1050 x3 + 0.4971 <= x5 <= 0.1050 x3 + 0.6708. */ compareBranchSymbolicBounds( nlr, NLR::NeuronIndex( 2, 0 ), Vector( { 0.1050, 0.1050 } ), - Vector( { 0.1845, 0.1050 } ), - Vector( { 0.3292, 0.4737 } ), - Vector( { 0.4882, 0.6708 } ) ); + Vector( { 0.1898, 0.1050 } ), + Vector( { 0.3292, 0.4971 } ), + Vector( { 0.4988, 0.6708 } ) ); compareBranchSymbolicBounds( nlr, NLR::NeuronIndex( 2, 1 ), Vector( { 0.1050, 0.1050 } ), - Vector( { 0.1845, 0.1050 } ), - Vector( { 0.3292, 0.4737 } ), - Vector( { 0.4882, 0.6708 } ) ); + Vector( { 0.1898, 0.1050 } ), + Vector( { 0.3292, 0.4971 } ), + Vector( { 0.4988, 0.6708 } ) ); /* Lower branch symbolic bounds: 0 <= x8 <= 0. @@ -4537,68 +4183,61 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Lower branch, using x2: [-2, -2/11], 0.1050 x2 + 0.3292 <= x4 <= 0.1845 x2 + 0.4882: Output symbolic bounds: - 0.1050 x2 - 0.0516 <= x8 <= 0.1845 x2 + 1.8690, - 0.1050 x2 - 1.0516 <= x9 <= 0.1845 x2 + 0.8690. - Concrete bounds: x8: [-0.2616, 1.8355], x9: [-1.2616, 0.8355]. - DeepPoly bounds: x8: [0, 2], x9: [-1, 1]. - Improvement: 0.1645 + 0.1645 = 0.3290. - + 0.1050 x2 - 0.0516 <= x8 <= 0.1898 x2 + 1.8796, + 0.1050 x2 - 1.0516 <= x9 <= 0.1898 x2 + 0.8796. Upper branch, using x2: [-2/11, 2], 0.1050 x2 + 0.4737 <= x4 <= 0.1050 x2 + 0.6708: Output symbolic bounds: - 0.1050 x2 + 0.0929 <= x8 <= 0.1050 x2 + 2.0516, - 0.1050 x2 - 0.9071 <= x9 <= 0.1050 x2 + 1.0516. - Concrete bounds: x8: [0.0738, 2.2616], x9: [-0.9262, 1.2616]. - DeepPoly bounds: x8: [0, 2], x9: [-1, 1]. - Improvement: 0.0738 + 0.0738 = 0.1476. + 0.1050 x2 + 0.1163 <= x8 <= 0.1050 x2 + 2.0516, + 0.1050 x2 - 0.8837 <= x9 <= 0.1050 x2 + 1.0516. + + Summing over all branches and output neurons: + Lower symbolic expression: 0.4200 x2 - 1.8705 >= -2.7105. + Upper symbolic expression: 0.5896 x2 + 5.8624 <= 7.0416. - Final score = ( 0.3290 + 0.1476 ) / 2 = 0.2384. + Final score = ( 7.0416 - (-2.7105) ) / 2 = 4.8761. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0.2384 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 4.8761 ); /* Calculating BBPS-based PMNR score of x5: Symbolic bounds of output layer in terms of Layer 2: x4 + x5 - 0.5 <= x8 <= x4 + x5 + 0.5, x4 - x5 - 0.5 <= x9 <= x4 - x5 + 0.5. Concretizing x4: x5 - 0.3808 <= x8 <= x5 + 1.3808, -x5 - 0.3808 <= x9 <= -x5 + 1.3808. + // 0.1050 x2 + 0.3292 <= x4 <= 0.1898 x2 + 0.4988 + // 0.1050 x2 + 0.4971 <= x4 <= 0.1050 x2 + 0.6708 Lower branch, using x2: [-2, -2/11], 0.1050 x3 + 0.3292 <= x5 <= 0.1845 x3 + 0.4882: Output symbolic bounds: - 0.1050 x3 - 0.0516 <= x8 <= 0.1845 x3 + 1.8690, - -0.1845 x3 - 0.8690 <= x9 <= -0.1050 x3 + 1.0516. - Concrete bounds: x8: [-0.2616, 1.8355], x9: [-0.8355, 1.2616]. - DeepPoly bounds: x8: [0, 2], x9: [-1, 1]. - Improvement: 0.1645 + 0.1645 = 0.3290. - + 0.1050 x3 - 0.0516 <= x8 <= 0.1898 x3 + 1.8796, + -0.1898 x3 - 0.8796 <= x9 <= -0.1050 x3 + 1.0516. Upper branch, using x2: [-2/11, 2], 0.1050 x3 + 0.4737 <= x5 <= 0.1050 x3 + 0.6708: Output symbolic bounds: - 0.1050 x3 + 0.0929 <= x8 <= 0.1050 x3 + 2.0516, - -0.1050 x3 - 1.0516 <= x9 <= -0.1050 x3 + 0.9071. - Concrete bounds: x8: [0.0738, 2.2616], x9: [-1.1171, 0.9262]. - DeepPoly bounds: x8: [0, 2], x9: [-1, 1]. - Improvement: 0.0738 + 0.0738 = 0.1476. + 0.1050 x3 + 0.1163 <= x8 <= 0.1050 x2 + 2.0516, + -0.1050 x3 - 1.0516 <= x9 <= -0.1050 x3 + 0.8837. + + Summing over all branches and output neurons: + Lower symbolic expression: -0.0848 x3 - 1.8665 >= -2.0361. + Upper symbolic expression: 0.0848 x3 + 5.8665 <= 6.0361. - Final score = ( 0.3290 + 0.1476 ) / 2 = 0.2384. + Final score = ( 6.0361 - (-2.0361) ) / 2 = 4.0361. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 0.2384 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 4.0361 ); /* Calculating BBPS-based PMNR score of x8: Symbolic bounds of output layer in terms of Layer 4: x8 <= x8 <= x8, x9 <= x9 <= x9. Concretizing x9: x8 <= x8 <= x8, -1 <= x9 <= 1. - Lower branch, using x6: [0.4483, 0.5], 0 <= x8 <= 0: Output symbolic bounds: - 0 <= x8 <= 0, -1 <= x9 <= 1. - Concrete bounds: x8: [0, 0], x9: [-1, 1]. - DeepPoly bounds: x8: [0, 2], x9: [-1, 1]. - Improvement: 2. - + Lower branch, using x6: [0.4483, 0.5], 0 <= x8 <= 0: + Output symbolic bounds: 0 <= x8 <= 0, -1 <= x9 <= 1. Upper branch, using x6: [0.5, 1.5516], x6 - 0.5 <= x8 <= x6 + 0.5: Output symbolic bounds: x6 - 0.5 <= x8 <= x6 + 0.5, -1 <= x9 <= 1. - Concrete bounds: x8: [0, 2.0516], x9: [-1, 1]. - DeepPoly bounds: x8: [0, 2], x9: [-1, 1]. - Improvement: 0. - Final score = ( 2 + 0 ) / 2 = 1. + Summing over all branches and output neurons: + Lower symbolic expression: x6 - 2.5 >= -2.0517. + Upper symbolic expression: x6 + 2.5 <= 4.0517. + + Final score = ( 4.0517 - (-2.0517) ) / 2 = 3.0517. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 1 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 0 ), 3.0517 ); /* Calculating BBPS-based PMNR score of x9: Symbolic bounds of output layer in terms of Layer 4: x8 <= x8 <= x8, x9 <= x9 <= x9. @@ -4606,19 +4245,16 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Lower branch, using x7: [-0.5516, -0.5], -1 <= x9 <= -1: Output symbolic bounds: 0 <= x8 <= 2, -1 <= x9 <= -1. - Concrete bounds: x8: [0, 0], x9: [-1, -1]. - DeepPoly bounds: x8: [0, 2], x9: [-1, 1]. - Improvement: 2. - Upper branch, using x7: [-0.5, 0.5516], x7 - 0.5 <= x9 <= x7 + 0.5: Output symbolic bounds: 0 <= x8 <= 2, x7 - 0.5 <= x9 <= x7 + 0.5. - Concrete bounds: x8: [0, 2], x9: [-1, 1.0516]. - DeepPoly bounds: x8: [0, 2], x9: [-1, 1]. - Improvement: 0. - Final score = ( 2 + 0 ) / 2 = 1. + Summing over all branches and output neurons: + Lower symbolic expression: x7 - 1.5 >= -2.0517. + Upper symbolic expression: x7 + 3.5 <= 4.0517. + + Final score = ( 4.0517 - (-2.0517) ) / 2 = 3.0517. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 4, 1 ), 1 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 4, 1 ), 3.0517 ); } void test_symbolic_bound_maps_max_not_fixed() @@ -4789,59 +4425,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NeuronIndex( 3, 0 ) } ) ); } - void test_gradient_selection_max_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTMax( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - // Invoke Parameterised DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - - /* - Calculating Gradient-based PMNR score of x4: - Symbolic bounds of output layer in terms of Layer 2: - 2x5 <= x7 <= 6. - x4 gradient: lower = ( 0 ), upper = ( 0 ), average = ( 0 ). - Gradient-based PMNR score of x4: ( 0 )^2 = 0. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0 ); - - /* - Calculating Gradient-based PMNR score of x5: - Symbolic bounds of output layer in terms of Layer 2: - 2x5 <= x7 <= 6. - x5 gradient: lower = ( 2 ), upper = ( 0 ), average = ( 1 ). - Gradient-based PMNR score of x5: ( 1 )^2 = 1. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 1 ); - - /* - Calculating Gradient-based PMNR score of x6: - Symbolic bounds of output layer in terms of Layer 3: - 2x6 <= x7 <= 2x6. - x6 gradient: lower = ( 2 ), upper = ( 2 ), average = ( 2 ). - Gradient-based PMNR score of x6: ( 2 )^2 = 4. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 3, 0 ), 4 ); - } - void test_bbps_selection_max_not_fixed() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -4869,11 +4456,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NeuronIndex( 2, 1 ), std::pair( { NLR::NeuronIndex( 1, 1 ), 0 } ) ); - // Using branching point (x5, 6/11) for x6 (MAX). + // Using branching point (x5, 16/101) for x6 (MAX). compareBBPSBranchingPoints( nlr, NLR::NeuronIndex( 3, 0 ), - std::pair( { NLR::NeuronIndex( 2, 1 ), 0.5455 } ) ); + std::pair( { NLR::NeuronIndex( 2, 1 ), 0.1584 } ) ); /* Lower branch symbolic bounds: 0 <= x4 <= 0. @@ -4922,45 +4509,48 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Lower branch, using x2: [-2, 0], 0 <= x4 <= 0: Output symbolic bounds 0 <= x6 <= 6. - Concrete bounds: [0, 6]. DeepPoly bounds: [0, 6]. Improvement: 0. - Upper branch, using x2: [0, 3], x2 <= x4 <= x2: Output symbolic bounds 0 <= x6 <= 6. - Concrete bounds: [0, 6]. DeepPoly bounds: [0, 6]. Improvement: 0. - Final score = ( 0 + 0 ) / 2 = 0. + Summing over all branches: + Lower symbolic expression: 0 >= 0. + Upper symbolic expression: 12 <= 12. + + Final score = ( 12 - 0 ) / 2 = 6. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 6 ); /* Calculating BBPS-based PMNR score of x5: Symbolic bounds of output layer in terms of Layer 2: 2x5 <= x7 <= 6. Lower branch, using x3: [-3, 0], 0 <= x5 <= 0: Output symbolic bounds 0 <= x6 <= 6. - Concrete bounds: [0, 6]. DeepPoly bounds: [0, 6]. Improvement: 0. - Upper branch, using x3: [0, 2], x3 <= x5 <= x3: - Output symbolic bounds 2x3 <= x6 <= x6. - Concrete bounds: [0, 6]. DeepPoly bounds: [0, 6]. Improvement: 0. + Output symbolic bounds 2x3 <= x6 <= 6. + + Summing over all branches: + Lower symbolic expression: 2x3 >= -6. + Upper symbolic expression: 12 <= 12. - Final score = ( 0 + 0 ) / 2 = 0. + Final score = ( 12 - (-6) ) / 2 = 9. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 0 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 9 ); /* Calculating BBPS-based PMNR score of x6: Symbolic bounds of output layer in terms of Layer 3: 2x6 <= x7 <= 2x6. Lower branch, x5: [0, 6/11], using x5 <= x6 <= 3: Output symbolic bounds 2x5 <= x6 <= 6. - Concrete bounds: [0, 6]. DeepPoly bounds: [0, 6]. Improvement: 0. - Upper branch, x5: [6/11, 2], using x5 <= x6 <= 3: Output symbolic bounds 2x5 <= x6 <= 6. - Concrete bounds: [12/11, 6]. DeepPoly bounds: [0, 6]. Improvement: 12/11. - Final score = ( 0 + 12/11 ) / 2 = 6/11. + Summing over all branches: + Lower symbolic expression: 4x4 >= 0. + Upper symbolic expression: 12 <= 12. + + Final score = ( 12 - 0 ) / 2 = 6. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 3, 0 ), 0.5455 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 3, 0 ), 6 ); } void test_symbolic_bound_maps_max_fixed() @@ -5645,66 +5235,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } } - void test_gradient_selection_softmax2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 1.000001 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 1.000001 ); - tableau.setLowerBound( 2, 1 ); - tableau.setUpperBound( 2, 1.000001 ); - - // Invoke Parameterised DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - - /* - Calculating Gradient-based PMNR score of x6: - Symbolic bounds of output layer in terms of Layer 2: - x6 + x7 + x8 <= x9 <= x6 + x7 + x8, - -x6 - x7 - x8 <= x10 <= -x6 - x7 - x8. - x6 gradient: lower = ( 1, -1 ), upper = ( 1, -1 ), average = ( 1, -1 ). - Gradient-based PMNR score of x6: ( 1 )^2 + ( -1 )^2 = 2. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 2 ); - - /* - Calculating Gradient-based PMNR score of x7: - Symbolic bounds of output layer in terms of Layer 2: - x6 + x7 + x8 <= x9 <= x6 + x7 + x8, - -x6 - x7 - x8 <= x10 <= -x6 - x7 - x8. - x7 gradient: lower = ( 1, -1 ), upper = ( 1, -1 ), average = ( 1, -1 ). - Gradient-based PMNR score of x7: ( 1 )^2 + ( -1 )^2 = 2. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 2 ); - - /* - Calculating Gradient-based PMNR score of x8: - Symbolic bounds of output layer in terms of Layer 3: - x6 + x7 + x8 <= x9 <= x6 + x7 + x8, - -x6 - x7 - x8 <= x10 <= -x6 - x7 - x8. - x8 gradient: lower = ( 1, -1 ), upper = ( 1, -1 ), average = ( 1, -1 ). - Gradient-based PMNR score of x8: ( 1 )^2 + ( -1 )^2 = 2. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 2 ), 2 ); - } - void test_bbps_selection_softmax2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -5779,9 +5314,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -x6 - x7 - x8 <= x10 <= -x6 - x7 - x8 Because the lower/upper symbolic bounds for output layer are equal (up to ~10^-6), - and lower/upper predecessor symbolic bounds for both branches are equal, the concrete + and lower/upper predecessor symbolic bounds for both branches are equal, the symbolic bounds for every output neuron, every nonfixed neuron and branch are equal to DeepPoly. - Consequently, the BBSP-based PMNR scores for all neurons is 0. + Consequently, the BBPS-based PMNR scores for all neurons equal 0. */ comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0 ); comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 0 ); @@ -6148,96 +5683,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NeuronIndex( 2, 4 ) } ) ); } - void test_gradient_selection_softmax3() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax2( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 1.00001 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 1.00001 ); - tableau.setLowerBound( 2, 1 ); - tableau.setUpperBound( 2, 1.00001 ); - - // Invoke Parameterised DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - - /* - Calculating Gradient-based PMNR score of x8: - Symbolic bounds of output layer in terms of Layer 2: - x8 + x10 + x12 <= x13 <= x8 + x10 + x12, - -x8 - x10 - x12 <= x14 <= -x8 - x10 - x12, - x9 + x11 <= x15 <= x9 + x11, - -x9 - x11 <= x16 <= -x9 - x11. - x8 gradient: lower = ( 1, -1, 0, 0 ), upper = ( 1, -1, 0, 0 ), avg. = ( 1, -1, 0, 0 ). - Gradient-based PMNR score of x8: ( 1 )^2 + ( -1 )^2 + ( 0 )^2 + ( 0 )^2 = 2. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 2 ); - - /* - Calculating Gradient-based PMNR score of x9: - Symbolic bounds of output layer in terms of Layer 2: - x8 + x10 + x12 <= x13 <= x8 + x10 + x12, - -x8 - x10 - x12 <= x14 <= -x8 - x10 - x12, - x9 + x11 <= x15 <= x9 + x11, - -x9 - x11 <= x16 <= -x9 - x11. - x9 gradient: lower = ( 0, 0, 1, -1 ), upper = ( 0, 0, 1, -1 ), avg. = ( 0, 0, 1, -1 ). - Gradient-based PMNR score of x9: ( 0 )^2 + ( 0 )^2 + ( 1 )^2 + ( -1 )^2 = 2. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 2 ); - - /* - Calculating Gradient-based PMNR score of x10: - Symbolic bounds of output layer in terms of Layer 2: - x8 + x10 + x12 <= x13 <= x8 + x10 + x12, - -x8 - x10 - x12 <= x14 <= -x8 - x10 - x12, - x9 + x11 <= x15 <= x9 + x11, - -x9 - x11 <= x16 <= -x9 - x11. - x10 gradient: lower = ( 1, -1, 0, 0 ), upper = ( 1, -1, 0, 0 ), avg. = ( 1, -1, 0, 0 ). - Gradient-based PMNR score of x10: ( 1 )^2 + ( -1 )^2 + ( 0 )^2 + ( 0 )^2 = 2. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 2 ), 2 ); - - /* - Calculating Gradient-based PMNR score of x11: - Symbolic bounds of output layer in terms of Layer 2: - x8 + x10 + x12 <= x13 <= x8 + x10 + x12, - -x8 - x10 - x12 <= x14 <= -x8 - x10 - x12, - x9 + x11 <= x15 <= x9 + x11, - -x9 - x11 <= x16 <= -x9 - x11. - x11 gradient: lower = ( 0, 0, 1, -1 ), upper = ( 0, 0, 1, -1 ), avg. = ( 0, 0, 1, -1 ). - Gradient-based PMNR score of x11: ( 0 )^2 + ( 0 )^2 + ( 1 )^2 + ( -1 )^2 = 2. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 3 ), 2 ); - - /* - Calculating Gradient-based PMNR score of x12: - Symbolic bounds of output layer in terms of Layer 2: - x8 + x10 + x12 <= x13 <= x8 + x10 + x12, - -x8 - x10 - x12 <= x14 <= -x8 - x10 - x12, - x9 + x11 <= x15 <= x9 + x11, - -x9 - x11 <= x16 <= -x9 - x11. - x12 gradient: lower = ( 1, -1, 0, 0 ), upper = ( 1, -1, 0, 0 ), avg. = ( 1, -1, 0, 0 ). - Gradient-based PMNR score of x12: ( 1 )^2 + ( -1 )^2 + ( 0 )^2 + ( 0 )^2 = 2. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 4 ), 2 ); - } - void test_bbps_selection_softmax3() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -6348,7 +5798,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Because the lower/upper symbolic bounds for output layer are equal (up to ~10^-6), and lower/upper predecessor symbolic bounds for both branches are equal, the concrete bounds for every output neuron, every nonfixed neuron and branch are equal to DeepPoly. - Consequently, the BBSP-based PMNR scores for all neurons is 0. + Consequently, the BBPS-based PMNR scores for all neurons equal 0. */ comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 0 ); comparePMNRScores( nlr, NLR::NeuronIndex( 2, 1 ), 0 ); @@ -6488,38 +5938,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite compareNonfixedNeurons( nlr, Set( { NLR::NeuronIndex( 2, 0 ) } ) ); } - void test_gradient_selection_bilinear() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-gradient" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -2 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke Parameterised DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - /* - Calculating Gradient-based PMNR score of x4: - Symbolic bounds of output layer in terms of Layer 2: -x4 <= x5 <= -x4. - x4 gradient: lower = ( -1 ), upper = ( -1 ) - Gradient-based PMNR score of x4: ( ( -1 )^2 + ( -1 )^2 ) / 2 = 1. - */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 1 ); - } - void test_bbps_selection_bilinear() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-pmnr-bbps" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-pmnr" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; @@ -6535,42 +5957,42 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.parameterisedDeepPoly( true ) ); - // Using branching point (x3, 1/3) for x4 (BILINEAR). + // Using branching point (x3, 0.4902) for x4 (BILINEAR). compareBBPSBranchingPoints( nlr, NLR::NeuronIndex( 2, 0 ), - std::pair( { NLR::NeuronIndex( 1, 1 ), 0.3333 } ) ); + std::pair( { NLR::NeuronIndex( 1, 1 ), 0.49016 } ) ); /* - Coefficients for bilinear layer (lower branch, x2: [-1, 6], x3: [-1, 1/3]): + Coefficients for bilinear layer (lower branch, x2: [-1, 6], x3: [-1, 0.49016]): Lower bound: alpha_l = x3.lb = -1 beta = x2.lb = -1 gamma_l = -x2.lb x3.lb = --1 * -1 = -1 Upper bound: - alpha_u = x3.ub = 1/3 + alpha_u = x3.ub = 0.49016 beta = x2.lb = -1 - gamma_u = -x2.lb x3.ub = --1 * 1/3 = 1/3 + gamma_u = -x2.lb x3.ub = --1 * 0.49016 = 0.49016 - -x2 - x3 - 1 <= x4 <= 1/3 x2 - x3 + 1/3. - Concretizing x2: -x3 - 7 <= x4 <= -x3 + 7/3. + -x2 - x3 - 1 <= x4 <= 0.49016 x2 - x3 + 0.49016. + Concretizing x2: -x3 - 7 <= x4 <= -x3 + 3.4314. - Coefficients for bilinear layer (upper branch, x2: [-1, 6], x3: [1/3, 3]): + Coefficients for bilinear layer (upper branch, x2: [-1, 6], x3: [0.49016, 3]): Lower bound: - alpha_l = x3.lb = 1/3 + alpha_l = x3.lb = 0.49016 beta = x2.lb = -1 - gamma_l = -x2.lb x3.lb = --1 * 1/3 = 1/3 + gamma_l = -x2.lb x3.lb = --1 * 0.49016 = 0.49016 Upper bound: alpha_u = x3.ub = 3 beta = x2.lb = -1 gamma_u = -x2.lb x3.ub = --1 * 3 = 3 - 1/3 x2 - x3 + 1/3 <= x4 <= 3x2 - x3 + 3. + 0.49016 x2 - x3 + 0.49016 <= x4 <= 3x2 - x3 + 3. Concretizing x2: -x3 <= x4 <= -x3 + 21. - Lower branch symbolic bounds: -x3 - 7 <= x4 <= -x3 + 7/3. + Lower branch symbolic bounds: -x3 - 7 <= x4 <= -x3 + 3.4314. Upper branch symbolic bounds: -x3 <= x4 <= -x3 + 21. */ compareBranchSymbolicBounds( nlr, @@ -6578,22 +6000,23 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Vector( { -1, -1 } ), Vector( { -1, -1 } ), Vector( { -7, 0 } ), - Vector( { 2.3333, 21 } ) ); + Vector( { 3.4314, 21 } ) ); /* Calculating BBPS-based PMNR score of x4: Symbolic bounds of output layer in terms of Layer 2: -x4 <= x5 <= -x4. - Lower branch, using x3: [-1, 1/3], -x3 - 7 <= x4 <= -x3 + 7/3: - Output symbolic bounds x3 - 7/3 <= x6 <= x3 + 7. - Concrete bounds: [-10/3, 22/3]. DeepPoly bounds: [-18, 6]. Improvement: 44/3. - + Lower branch, using x3: [-1, 1/3], -x3 - 7 <= x4 <= -x3 + 3.4314: + Output symbolic bounds x3 - 3.4314 <= x6 <= x3 + 7. Upper branch, using x3: [1/3, 3], -x3 <= x4 <= -x3 + 21: Output symbolic bounds x3 - 21 <= x6 <= x3. - Concrete bounds: [-62/3, 3]. DeepPoly bounds: [-18, 6]. Improvement: 3. - Final score = ( 44/3 + 3 ) / 2 = 53/6 = 8.8333. + Summing over all branches: + Lower symbolic expression: 2x3 - 24.4314 >= -26.4314. + Upper symbolic expression: 2x3 + 7 <= 13. + + Final score = ( 13 - (-26.4314) ) / 2 = 19.7157. */ - comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 8.8333 ); + comparePMNRScores( nlr, NLR::NeuronIndex( 2, 0 ), 19.7157 ); } void test_parameterised_symbolic_bound_maps_relus_all_active() From faee9d2499c22811a0fdd5b3c8037a0f7d5ad238 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Fri, 5 Dec 2025 21:07:47 +0200 Subject: [PATCH 81/81] Three fixes: 1. Limiting PMNR max iterations to 10. 2. Removing unused function declarations from LPFormulator.h. 3. Removing redundant 'include LPFormulator.h' from NetworkLevelReasoner.h to solve Release build failure on clang++. --- src/engine/Engine.cpp | 6 ++++-- src/nlr/LPFormulator.h | 3 --- src/nlr/NetworkLevelReasoner.h | 1 - 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index b220c00cac..b15e636fc1 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -1676,15 +1676,17 @@ void Engine::performAdditionalBackwardAnalysisIfNeeded() if ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PMNR ) { + unsigned iter = 1; unsigned tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); if ( _verbosity > 0 ) printf( "Backward analysis tightened %u bounds\n", tightened ); - while ( tightened && GlobalConfiguration::MAX_ROUNDS_OF_PMNR_BACKWARD_ANALYSIS ) + while ( tightened && iter < GlobalConfiguration::MAX_ROUNDS_OF_PMNR_BACKWARD_ANALYSIS ) { performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); if ( _verbosity > 0 ) printf( "Backward analysis tightened %u bounds\n", tightened ); + ++iter; } } } @@ -4084,4 +4086,4 @@ Engine::analyseExplanationDependencies( const SparseUnsortedList &explanation, } return entries; -} \ No newline at end of file +} diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index 7d0737b635..b983a48267 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -59,9 +59,6 @@ class LPFormulator : public ParallelSolver Map>(), const Vector &polygonalTightenings = Vector( {} ) ); - void optimizeBoundsWithPreimageApproximation( Map &layers ); - void optimizeBoundsWithInvprop( Map &layers ); - void optimizeBoundsWithPMNR( Map &layers ); void optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ); void optimizeBoundsWithIncrementalLpRelaxation( const Map &layers ); diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index fd285551fd..e9dd4aa106 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -18,7 +18,6 @@ #include "DeepPolyAnalysis.h" #include "ITableau.h" -#include "LPFormulator.h" #include "Layer.h" #include "LayerOwner.h" #include "Map.h"